From 09a4951478746ba0d95521b786439e58aeda179b Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Mon, 12 Aug 2024 12:28:19 +0200 Subject: [PATCH] Prepare Release v2.15.1 (#6050) * fix: JetSteam scaler detects leader changes correctly (#6043) Signed-off-by: Jorge Turrado * bump deps & k8s to v0.29.7 (#6035) Signed-off-by: Zbynek Roubalik Co-authored-by: Jorge Turrado Ferrero Signed-off-by: Jorge Turrado * fix: Hashicorp Vault PKI doesn't fail with due to KeyPair mismatch (#6029) Signed-off-by: Jorge Turrado Signed-off-by: Jorge Turrado * update changelog Signed-off-by: Jorge Turrado * fix style Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Zbynek Roubalik Signed-off-by: Jorge Turrado Co-authored-by: Zbynek Roubalik --- CHANGELOG.md | 35 +- .../eventing.keda.sh_cloudeventsources.yaml | 2 +- ...keda.sh_clustertriggerauthentications.yaml | 2 +- config/crd/bases/keda.sh_scaledjobs.yaml | 2 +- config/crd/bases/keda.sh_scaledobjects.yaml | 2 +- .../bases/keda.sh_triggerauthentications.yaml | 2 +- go.mod | 268 +- go.sum | 1203 +-- pkg/metricsservice/api/metrics.pb.go | 8 +- pkg/metricsservice/api/metrics_grpc.pb.go | 30 +- pkg/mock/mock_eventemitter/mock_interface.go | 3 +- .../authentication/authentication_helpers.go | 2 +- pkg/scalers/external_scaler.go | 6 +- pkg/scalers/external_scaler_test.go | 2 +- .../externalscaler/externalscaler.pb.go | 20 +- .../externalscaler/externalscaler_grpc.pb.go | 83 +- pkg/scalers/gcp/gcp_stackdriver_client.go | 2 +- pkg/scalers/liiklus/LiiklusService.pb.go | 36 +- pkg/scalers/liiklus/LiiklusService_grpc.pb.go | 133 +- pkg/scalers/liiklus_scaler.go | 2 +- pkg/scalers/nats_jetstream_scaler.go | 14 - pkg/scalers/nats_jetstream_scaler_test.go | 16 - pkg/scalers/predictkube_scaler.go | 2 +- .../resolver/gcp_secretmanager_handler.go | 2 +- .../resolver/hashicorpvault_handler.go | 6 +- tests/scalers/mssql/mssql_test.go | 6 +- .../nats_jetstream/helper/nats_helper.go | 22 + .../nats_jetstream_cluster_test.go | 4 + vendor/cloud.google.com/go/auth/CHANGES.md | 88 + vendor/cloud.google.com/go/auth/auth.go | 167 +- .../go/auth/credentials/compute.go | 11 +- .../go/auth/credentials/detect.go | 20 +- .../go/auth/credentials/filetypes.go | 2 + .../internal/externalaccount/aws_provider.go | 59 +- .../internal/externalaccount/url_provider.go | 16 +- .../go/auth/credentials/internal/gdch/gdch.go | 10 +- .../internal/impersonate/impersonate.go | 7 +- .../internal/stsexchange/sts_exchange.go | 8 +- .../go/auth/grpctransport/directpath.go | 4 +- .../go/auth/grpctransport/grpctransport.go | 85 +- .../go/auth/httptransport/httptransport.go | 20 +- .../go/auth/httptransport/transport.go | 16 +- .../go/auth/internal/internal.go | 21 +- .../go/auth/internal/transport/cba.go | 14 +- .../internal/transport/cert/default_cert.go | 9 +- .../internal/transport/cert/workload_cert.go | 117 + .../go/auth/internal/transport/transport.go | 31 +- .../go/auth/oauth2adapt/CHANGES.md | 7 + .../cloud.google.com/go/auth/threelegged.go | 19 +- .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata/metadata.go | 381 +- .../go/compute/metadata/syscheck.go | 26 + .../go/compute/metadata/syscheck_linux.go | 28 + .../go/compute/metadata/syscheck_windows.go | 38 + vendor/cloud.google.com/go/iam/CHANGES.md | 35 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 16 +- .../go/iam/apiv1/iampb/options.pb.go | 10 +- .../go/iam/apiv1/iampb/policy.pb.go | 22 +- .../go/internal/.repo-metadata-full.json | 114 +- .../cloud.google.com/go/internal/gen_info.sh | 46 + .../go/internal/trace/trace.go | 92 +- .../apiv3/v2/alert_policy_client.go | 5 +- .../go/monitoring/apiv3/v2/group_client.go | 5 +- .../go/monitoring/apiv3/v2/metric_client.go | 5 +- .../apiv3/v2/monitoringpb/alert.pb.go | 856 +- .../apiv3/v2/monitoringpb/alert_service.pb.go | 20 +- .../apiv3/v2/monitoringpb/common.pb.go | 32 +- .../v2/monitoringpb/dropped_labels.pb.go | 10 +- .../apiv3/v2/monitoringpb/group.pb.go | 10 +- .../apiv3/v2/monitoringpb/group_service.pb.go | 26 +- .../apiv3/v2/monitoringpb/metric.pb.go | 30 +- .../v2/monitoringpb/metric_service.pb.go | 42 +- .../v2/monitoringpb/mutation_record.pb.go | 10 +- .../apiv3/v2/monitoringpb/notification.pb.go | 12 +- .../monitoringpb/notification_service.pb.go | 34 +- .../apiv3/v2/monitoringpb/query_service.pb.go | 8 +- .../apiv3/v2/monitoringpb/service.pb.go | 1377 ++- .../v2/monitoringpb/service_service.pb.go | 32 +- .../apiv3/v2/monitoringpb/snooze.pb.go | 12 +- .../v2/monitoringpb/snooze_service.pb.go | 18 +- .../apiv3/v2/monitoringpb/span_context.pb.go | 10 +- .../apiv3/v2/monitoringpb/uptime.pb.go | 977 +- .../v2/monitoringpb/uptime_service.pb.go | 24 +- .../apiv3/v2/notification_channel_client.go | 5 +- .../go/monitoring/apiv3/v2/query_client.go | 5 +- .../apiv3/v2/service_monitoring_client.go | 5 +- .../go/monitoring/apiv3/v2/snooze_client.go | 5 +- .../apiv3/v2/uptime_check_client.go | 5 +- .../go/monitoring/internal/version.go | 2 +- .../go/secretmanager/apiv1/auxiliary.go | 48 + .../secretmanager/apiv1/gapic_metadata.json | 20 + .../apiv1/secret_manager_client.go | 334 +- .../apiv1/secretmanagerpb/resources.pb.go | 553 +- .../apiv1/secretmanagerpb/service.pb.go | 741 +- .../go/secretmanager/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/CHANGES.md | 64 + vendor/cloud.google.com/go/storage/acl.go | 11 - vendor/cloud.google.com/go/storage/bucket.go | 151 + vendor/cloud.google.com/go/storage/client.go | 29 +- vendor/cloud.google.com/go/storage/doc.go | 10 +- .../go/storage/grpc_client.go | 73 +- vendor/cloud.google.com/go/storage/hmac.go | 1 - .../go/storage/http_client.go | 83 +- .../storage/internal/apiv2/storage_client.go | 4 +- .../internal/apiv2/storagepb/storage.pb.go | 210 +- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 32 +- .../go/storage/notifications.go | 2 +- vendor/cloud.google.com/go/storage/option.go | 19 +- vendor/cloud.google.com/go/storage/reader.go | 12 + vendor/cloud.google.com/go/storage/storage.go | 90 +- .../kusto/internal/version/version.go | 2 +- .../Azure/azure-kusto-go/kusto/kcsb.go | 99 +- .../well_known_kusto_endpoints.json | 4 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 21 + .../internal/resource/resource_identifier.go | 2 +- .../sdk/azcore/arm/runtime/pipeline.go | 8 +- .../sdk/azcore/internal/exported/request.go | 37 + .../azcore/internal/pollers/async/async.go | 2 +- .../sdk/azcore/internal/pollers/body/body.go | 2 +- .../sdk/azcore/internal/pollers/fake/fake.go | 2 +- .../sdk/azcore/internal/pollers/loc/loc.go | 2 +- .../sdk/azcore/internal/pollers/op/op.go | 11 +- .../sdk/azcore/internal/pollers/util.go | 14 +- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/runtime/pager.go | 13 +- .../sdk/azcore/runtime/request.go | 16 + .../sdk/messaging/azservicebus/CHANGELOG.md | 6 + .../azservicebus/internal/conn/conn.go | 7 +- .../azservicebus/internal/constants.go | 2 +- .../sdk/storage/azblob/CHANGELOG.md | 15 + .../sdk/storage/azblob/README.md | 3 + .../sdk/storage/azblob/appendblob/client.go | 4 +- .../sdk/storage/azblob/assets.json | 2 +- .../sdk/storage/azblob/blob/client.go | 5 +- .../sdk/storage/azblob/blob/retry_reader.go | 1 - .../sdk/storage/azblob/blockblob/client.go | 4 +- .../azblob/internal/exported/version.go | 2 +- .../azblob/internal/generated/autorest.md | 8 +- .../azblob/internal/generated/constants.go | 2 +- .../generated/zz_appendblob_client.go | 239 +- .../internal/generated/zz_blob_client.go | 719 +- .../internal/generated/zz_blockblob_client.go | 365 +- .../azblob/internal/generated/zz_constants.go | 3 - .../internal/generated/zz_container_client.go | 203 +- .../azblob/internal/generated/zz_models.go | 3 - .../internal/generated/zz_models_serde.go | 75 +- .../azblob/internal/generated/zz_options.go | 3 - .../internal/generated/zz_pageblob_client.go | 457 +- .../{zz_response_types.go => zz_responses.go} | 3 - .../internal/generated/zz_service_client.go | 61 +- .../internal/generated/zz_time_rfc1123.go | 10 +- .../internal/generated/zz_time_rfc3339.go | 46 +- .../internal/generated/zz_xml_helper.go | 3 - .../azblob/internal/shared/batch_transfer.go | 1 - .../sdk/storage/azblob/pageblob/client.go | 4 +- .../sdk/storage/azblob/sas/service.go | 4 +- .../sdk/storage/azblob/sas/url_parts.go | 2 +- .../go-autorest/autorest/azure/auth/auth.go | 10 +- vendor/github.com/IBM/sarama/Dockerfile.kafka | 2 +- .../github.com/IBM/sarama/docker-compose.yml | 9 + .../aws/accountid_endpoint_mode.go | 18 + .../aws/aws-sdk-go-v2/aws/config.go | 3 + .../aws/aws-sdk-go-v2/aws/credentials.go | 3 + .../aws/aws-sdk-go-v2/aws/endpoints.go | 26 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/private/metrics/metrics.go | 5 +- .../aws/middleware/user_agent.go | 44 + .../aws/protocol/eventstream/CHANGELOG.md | 4 + .../eventstream/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 45 +- .../aws/retry/retryable_error.go | 21 + .../aws/signer/internal/v4/headers.go | 1 - .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 29 - .../aws/aws-sdk-go-v2/aws/signer/v4/v4.go | 61 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 65 + .../aws/aws-sdk-go-v2/config/config.go | 3 + .../aws/aws-sdk-go-v2/config/env_config.go | 37 + .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/load_options.go | 29 +- .../aws/aws-sdk-go-v2/config/provider.go | 17 + .../aws/aws-sdk-go-v2/config/resolve.go | 16 + .../aws/aws-sdk-go-v2/config/shared_config.go | 34 + .../aws-sdk-go-v2/credentials/CHANGELOG.md | 64 + .../endpointcreds/internal/client/client.go | 1 + .../credentials/endpointcreds/provider.go | 1 + .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 4 + .../ssocreds/sso_credentials_provider.go | 1 + .../stscreds/assume_role_provider.go | 6 + .../stscreds/web_identity_provider.go | 19 + .../feature/ec2/imds/CHANGELOG.md | 40 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/auth/smithy/v4signer_adapter.go | 6 +- .../internal/awsutil/path_value.go | 225 - .../internal/configsources/CHANGELOG.md | 40 + .../configsources/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/context/context.go | 52 + .../endpoints/awsrulesfn/partition.go | 11 +- .../endpoints/awsrulesfn/partitions.go | 94 +- .../endpoints/awsrulesfn/partitions.json | 6 +- .../internal/endpoints/v2/CHANGELOG.md | 40 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/middleware/middleware.go | 42 + .../aws-sdk-go-v2/service/amp/CHANGELOG.md | 54 + .../aws-sdk-go-v2/service/amp/api_client.go | 108 +- .../api_op_CreateAlertManagerDefinition.go | 14 +- .../amp/api_op_CreateLoggingConfiguration.go | 6 + .../amp/api_op_CreateRuleGroupsNamespace.go | 24 +- .../service/amp/api_op_CreateScraper.go | 39 +- .../service/amp/api_op_CreateWorkspace.go | 17 +- .../api_op_DeleteAlertManagerDefinition.go | 6 + .../amp/api_op_DeleteLoggingConfiguration.go | 6 + .../amp/api_op_DeleteRuleGroupsNamespace.go | 6 + .../service/amp/api_op_DeleteScraper.go | 6 + .../service/amp/api_op_DeleteWorkspace.go | 13 +- .../api_op_DescribeAlertManagerDefinition.go | 6 + .../api_op_DescribeLoggingConfiguration.go | 6 + .../amp/api_op_DescribeRuleGroupsNamespace.go | 6 + .../service/amp/api_op_DescribeScraper.go | 62 +- .../service/amp/api_op_DescribeWorkspace.go | 62 +- .../api_op_GetDefaultScraperConfiguration.go | 13 +- .../amp/api_op_ListRuleGroupsNamespaces.go | 35 +- .../service/amp/api_op_ListScrapers.go | 54 +- .../service/amp/api_op_ListTagsForResource.go | 6 + .../service/amp/api_op_ListWorkspaces.go | 42 +- .../amp/api_op_PutAlertManagerDefinition.go | 14 +- .../amp/api_op_PutRuleGroupsNamespace.go | 25 +- .../service/amp/api_op_TagResource.go | 21 +- .../service/amp/api_op_UntagResource.go | 6 + .../amp/api_op_UpdateLoggingConfiguration.go | 6 + .../amp/api_op_UpdateWorkspaceAlias.go | 13 +- .../aws/aws-sdk-go-v2/service/amp/auth.go | 8 +- .../service/amp/deserializers.go | 9 + .../aws/aws-sdk-go-v2/service/amp/doc.go | 18 +- .../aws-sdk-go-v2/service/amp/endpoints.go | 19 +- .../service/amp/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/amp/options.go | 34 +- .../aws-sdk-go-v2/service/amp/types/enums.go | 26 +- .../aws-sdk-go-v2/service/amp/types/types.go | 19 +- .../service/cloudwatch/CHANGELOG.md | 54 + .../service/cloudwatch/api_client.go | 102 + .../service/cloudwatch/api_op_DeleteAlarms.go | 41 +- .../api_op_DeleteAnomalyDetector.go | 42 +- .../cloudwatch/api_op_DeleteDashboards.go | 6 + .../cloudwatch/api_op_DeleteInsightRules.go | 18 +- .../cloudwatch/api_op_DeleteMetricStream.go | 6 + .../cloudwatch/api_op_DescribeAlarmHistory.go | 37 +- .../cloudwatch/api_op_DescribeAlarms.go | 289 +- .../api_op_DescribeAlarmsForMetric.go | 15 +- .../api_op_DescribeAnomalyDetectors.go | 37 +- .../cloudwatch/api_op_DescribeInsightRules.go | 34 +- .../cloudwatch/api_op_DisableAlarmActions.go | 6 + .../cloudwatch/api_op_DisableInsightRules.go | 11 +- .../cloudwatch/api_op_EnableAlarmActions.go | 6 + .../cloudwatch/api_op_EnableInsightRules.go | 11 +- .../service/cloudwatch/api_op_GetDashboard.go | 20 +- .../cloudwatch/api_op_GetInsightRuleReport.go | 61 +- .../cloudwatch/api_op_GetMetricData.go | 155 +- .../cloudwatch/api_op_GetMetricStatistics.go | 100 +- .../cloudwatch/api_op_GetMetricStream.go | 16 +- .../cloudwatch/api_op_GetMetricWidgetImage.go | 36 +- .../cloudwatch/api_op_ListDashboards.go | 34 +- .../api_op_ListManagedInsightRules.go | 40 +- .../cloudwatch/api_op_ListMetricStreams.go | 25 +- .../service/cloudwatch/api_op_ListMetrics.go | 74 +- .../cloudwatch/api_op_ListTagsForResource.go | 22 +- .../cloudwatch/api_op_PutAnomalyDetector.go | 50 +- .../cloudwatch/api_op_PutCompositeAlarm.go | 183 +- .../service/cloudwatch/api_op_PutDashboard.go | 44 +- .../cloudwatch/api_op_PutInsightRule.go | 41 +- .../api_op_PutManagedInsightRules.go | 27 +- .../cloudwatch/api_op_PutMetricAlarm.go | 325 +- .../cloudwatch/api_op_PutMetricData.go | 73 +- .../cloudwatch/api_op_PutMetricStream.go | 104 +- .../cloudwatch/api_op_SetAlarmState.go | 37 +- .../cloudwatch/api_op_StartMetricStreams.go | 15 +- .../cloudwatch/api_op_StopMetricStreams.go | 15 +- .../service/cloudwatch/api_op_TagResource.go | 50 +- .../cloudwatch/api_op_UntagResource.go | 22 +- .../aws-sdk-go-v2/service/cloudwatch/auth.go | 8 +- .../service/cloudwatch/deserializers.go | 8 + .../aws-sdk-go-v2/service/cloudwatch/doc.go | 23 +- .../service/cloudwatch/endpoints.go | 19 +- .../service/cloudwatch/go_module_metadata.go | 2 +- .../service/cloudwatch/options.go | 34 +- .../service/cloudwatch/types/enums.go | 62 +- .../service/cloudwatch/types/types.go | 394 +- .../service/dynamodb/CHANGELOG.md | 74 + .../service/dynamodb/api_client.go | 108 +- .../dynamodb/api_op_BatchExecuteStatement.go | 33 +- .../service/dynamodb/api_op_BatchGetItem.go | 178 +- .../service/dynamodb/api_op_BatchWriteItem.go | 169 +- .../service/dynamodb/api_op_CreateBackup.go | 46 +- .../dynamodb/api_op_CreateGlobalTable.go | 50 +- .../service/dynamodb/api_op_CreateTable.go | 150 +- .../service/dynamodb/api_op_DeleteBackup.go | 11 +- .../service/dynamodb/api_op_DeleteItem.go | 160 +- .../dynamodb/api_op_DeleteResourcePolicy.go | 32 +- .../service/dynamodb/api_op_DeleteTable.go | 33 +- .../service/dynamodb/api_op_DescribeBackup.go | 11 +- .../api_op_DescribeContinuousBackups.go | 28 +- .../api_op_DescribeContributorInsights.go | 15 +- .../dynamodb/api_op_DescribeEndpoints.go | 11 +- .../service/dynamodb/api_op_DescribeExport.go | 6 + .../dynamodb/api_op_DescribeGlobalTable.go | 28 +- .../api_op_DescribeGlobalTableSettings.go | 28 +- .../service/dynamodb/api_op_DescribeImport.go | 10 +- ..._op_DescribeKinesisStreamingDestination.go | 6 + .../service/dynamodb/api_op_DescribeLimits.go | 66 +- .../service/dynamodb/api_op_DescribeTable.go | 163 +- .../api_op_DescribeTableReplicaAutoScaling.go | 11 +- .../dynamodb/api_op_DescribeTimeToLive.go | 6 + ...i_op_DisableKinesisStreamingDestination.go | 6 + ...pi_op_EnableKinesisStreamingDestination.go | 6 + .../dynamodb/api_op_ExecuteStatement.go | 46 +- .../dynamodb/api_op_ExecuteTransaction.go | 28 +- .../api_op_ExportTableToPointInTime.go | 29 +- .../service/dynamodb/api_op_GetItem.go | 88 +- .../dynamodb/api_op_GetResourcePolicy.go | 37 +- .../service/dynamodb/api_op_ImportTable.go | 33 +- .../service/dynamodb/api_op_ListBackups.go | 46 +- .../api_op_ListContributorInsights.go | 25 +- .../service/dynamodb/api_op_ListExports.go | 23 +- .../dynamodb/api_op_ListGlobalTables.go | 38 +- .../service/dynamodb/api_op_ListImports.go | 35 +- .../service/dynamodb/api_op_ListTables.go | 125 +- .../dynamodb/api_op_ListTagsOfResource.go | 15 +- .../service/dynamodb/api_op_PutItem.go | 204 +- .../dynamodb/api_op_PutResourcePolicy.go | 70 +- .../service/dynamodb/api_op_Query.go | 492 +- .../dynamodb/api_op_RestoreTableFromBackup.go | 25 +- .../api_op_RestoreTableToPointInTime.go | 40 +- .../service/dynamodb/api_op_Scan.go | 438 +- .../service/dynamodb/api_op_TagResource.go | 15 +- .../dynamodb/api_op_TransactGetItems.go | 25 +- .../dynamodb/api_op_TransactWriteItems.go | 50 +- .../service/dynamodb/api_op_UntagResource.go | 15 +- .../api_op_UpdateContinuousBackups.go | 19 +- .../api_op_UpdateContributorInsights.go | 6 + .../dynamodb/api_op_UpdateGlobalTable.go | 49 +- .../api_op_UpdateGlobalTableSettings.go | 39 +- .../service/dynamodb/api_op_UpdateItem.go | 248 +- ...pi_op_UpdateKinesisStreamingDestination.go | 6 + .../service/dynamodb/api_op_UpdateTable.go | 58 +- .../api_op_UpdateTableReplicaAutoScaling.go | 13 +- .../dynamodb/api_op_UpdateTimeToLive.go | 43 +- .../aws-sdk-go-v2/service/dynamodb/auth.go | 8 +- .../service/dynamodb/deserializers.go | 150 + .../aws/aws-sdk-go-v2/service/dynamodb/doc.go | 35 +- .../service/dynamodb/endpoints.go | 19 +- .../service/dynamodb/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/dynamodb/options.go | 34 +- .../service/dynamodb/serializers.go | 106 + .../service/dynamodb/types/enums.go | 175 +- .../service/dynamodb/types/errors.go | 219 +- .../service/dynamodb/types/types.go | 1370 ++- .../service/dynamodb/validators.go | 4 +- .../service/dynamodbstreams/CHANGELOG.md | 54 + .../service/dynamodbstreams/api_client.go | 108 +- .../dynamodbstreams/api_op_DescribeStream.go | 21 +- .../dynamodbstreams/api_op_GetRecords.go | 25 +- .../api_op_GetShardIterator.go | 15 +- .../dynamodbstreams/api_op_ListStreams.go | 18 +- .../service/dynamodbstreams/auth.go | 8 +- .../service/dynamodbstreams/deserializers.go | 9 + .../service/dynamodbstreams/doc.go | 11 +- .../service/dynamodbstreams/endpoints.go | 19 +- .../dynamodbstreams/go_module_metadata.go | 2 +- .../service/dynamodbstreams/options.go | 34 +- .../service/dynamodbstreams/types/enums.go | 25 +- .../service/dynamodbstreams/types/errors.go | 32 +- .../service/dynamodbstreams/types/types.go | 173 +- .../internal/accept-encoding/CHANGELOG.md | 4 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/endpoint-discovery/CHANGELOG.md | 40 + .../endpoint-discovery/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 40 + .../presigned-url/go_module_metadata.go | 2 +- .../service/kinesis/CHANGELOG.md | 54 + .../service/kinesis/api_client.go | 108 +- .../service/kinesis/api_op_AddTagsToStream.go | 23 +- .../service/kinesis/api_op_CreateStream.go | 74 +- .../api_op_DecreaseStreamRetentionPeriod.go | 24 +- .../kinesis/api_op_DeleteResourcePolicy.go | 9 + .../service/kinesis/api_op_DeleteStream.go | 42 +- .../api_op_DeregisterStreamConsumer.go | 22 +- .../service/kinesis/api_op_DescribeLimits.go | 19 +- .../service/kinesis/api_op_DescribeStream.go | 124 +- .../kinesis/api_op_DescribeStreamConsumer.go | 27 +- .../kinesis/api_op_DescribeStreamSummary.go | 24 +- .../api_op_DisableEnhancedMonitoring.go | 41 +- .../api_op_EnableEnhancedMonitoring.go | 38 +- .../service/kinesis/api_op_GetRecords.go | 121 +- .../kinesis/api_op_GetResourcePolicy.go | 9 + .../kinesis/api_op_GetShardIterator.go | 84 +- .../api_op_IncreaseStreamRetentionPeriod.go | 32 +- .../service/kinesis/api_op_ListShards.go | 117 +- .../kinesis/api_op_ListStreamConsumers.go | 78 +- .../service/kinesis/api_op_ListStreams.go | 39 +- .../kinesis/api_op_ListTagsForStream.go | 15 +- .../service/kinesis/api_op_MergeShards.go | 58 +- .../service/kinesis/api_op_PutRecord.go | 84 +- .../service/kinesis/api_op_PutRecords.go | 113 +- .../kinesis/api_op_PutResourcePolicy.go | 20 +- .../kinesis/api_op_RegisterStreamConsumer.go | 39 +- .../kinesis/api_op_RemoveTagsFromStream.go | 20 +- .../service/kinesis/api_op_SplitShard.go | 87 +- .../kinesis/api_op_StartStreamEncryption.go | 49 +- .../kinesis/api_op_StopStreamEncryption.go | 50 +- .../kinesis/api_op_SubscribeToShard.go | 49 +- .../kinesis/api_op_UpdateShardCount.go | 71 +- .../kinesis/api_op_UpdateStreamMode.go | 14 +- .../aws/aws-sdk-go-v2/service/kinesis/auth.go | 8 +- .../service/kinesis/deserializers.go | 9 + .../aws/aws-sdk-go-v2/service/kinesis/doc.go | 7 +- .../service/kinesis/endpoints.go | 19 +- .../service/kinesis/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/kinesis/options.go | 34 +- .../service/kinesis/types/enums.go | 40 +- .../service/kinesis/types/errors.go | 20 +- .../service/kinesis/types/types.go | 133 +- .../service/secretsmanager/CHANGELOG.md | 68 + .../service/secretsmanager/api_client.go | 108 +- .../api_op_BatchGetSecretValue.go | 73 +- .../api_op_CancelRotateSecret.go | 54 +- .../secretsmanager/api_op_CreateSecret.go | 209 +- .../api_op_DeleteResourcePolicy.go | 32 +- .../secretsmanager/api_op_DeleteSecret.go | 93 +- .../secretsmanager/api_op_DescribeSecret.go | 89 +- .../api_op_GetRandomPassword.go | 21 +- .../api_op_GetResourcePolicy.go | 40 +- .../secretsmanager/api_op_GetSecretValue.go | 98 +- .../api_op_ListSecretVersionIds.go | 76 +- .../secretsmanager/api_op_ListSecrets.go | 74 +- .../api_op_PutResourcePolicy.go | 67 +- .../secretsmanager/api_op_PutSecretValue.go | 170 +- .../api_op_RemoveRegionsFromReplication.go | 26 +- .../api_op_ReplicateSecretToRegions.go | 25 +- .../secretsmanager/api_op_RestoreSecret.go | 36 +- .../secretsmanager/api_op_RotateSecret.go | 119 +- .../api_op_StopReplicationToReplica.go | 31 +- .../secretsmanager/api_op_TagResource.go | 63 +- .../secretsmanager/api_op_UntagResource.go | 62 +- .../secretsmanager/api_op_UpdateSecret.go | 155 +- .../api_op_UpdateSecretVersionStage.go | 59 +- .../api_op_ValidateResourcePolicy.go | 44 +- .../service/secretsmanager/auth.go | 8 +- .../service/secretsmanager/deserializers.go | 9 + .../service/secretsmanager/doc.go | 52 +- .../service/secretsmanager/endpoints.go | 19 +- .../secretsmanager/go_module_metadata.go | 2 +- .../service/secretsmanager/options.go | 34 +- .../service/secretsmanager/serializers.go | 5 + .../service/secretsmanager/types/enums.go | 15 +- .../service/secretsmanager/types/errors.go | 19 +- .../service/secretsmanager/types/types.go | 111 +- .../aws-sdk-go-v2/service/sqs/CHANGELOG.md | 59 + .../aws-sdk-go-v2/service/sqs/api_client.go | 108 +- .../service/sqs/api_op_AddPermission.go | 70 +- .../sqs/api_op_CancelMessageMoveTask.go | 20 +- .../sqs/api_op_ChangeMessageVisibility.go | 86 +- .../api_op_ChangeMessageVisibilityBatch.go | 31 +- .../service/sqs/api_op_CreateQueue.go | 211 +- .../service/sqs/api_op_DeleteMessage.go | 37 +- .../service/sqs/api_op_DeleteMessageBatch.go | 28 +- .../service/sqs/api_op_DeleteQueue.go | 41 +- .../service/sqs/api_op_GetQueueAttributes.go | 149 +- .../service/sqs/api_op_GetQueueUrl.go | 31 +- .../sqs/api_op_ListDeadLetterSourceQueues.go | 53 +- .../sqs/api_op_ListMessageMoveTasks.go | 20 +- .../service/sqs/api_op_ListQueueTags.go | 16 +- .../service/sqs/api_op_ListQueues.go | 48 +- .../service/sqs/api_op_PurgeQueue.go | 30 +- .../service/sqs/api_op_ReceiveMessage.go | 230 +- .../service/sqs/api_op_RemovePermission.go | 21 +- .../service/sqs/api_op_SendMessage.go | 165 +- .../service/sqs/api_op_SendMessageBatch.go | 57 +- .../service/sqs/api_op_SetQueueAttributes.go | 146 +- .../sqs/api_op_StartMessageMoveTask.go | 19 +- .../service/sqs/api_op_TagQueue.go | 27 +- .../service/sqs/api_op_UntagQueue.go | 16 +- .../aws/aws-sdk-go-v2/service/sqs/auth.go | 8 +- .../service/sqs/deserializers.go | 10 + .../aws/aws-sdk-go-v2/service/sqs/doc.go | 53 +- .../aws-sdk-go-v2/service/sqs/endpoints.go | 19 +- .../service/sqs/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sqs/options.go | 34 +- .../aws-sdk-go-v2/service/sqs/serializers.go | 18 + .../aws-sdk-go-v2/service/sqs/types/enums.go | 13 +- .../aws-sdk-go-v2/service/sqs/types/errors.go | 5 + .../aws-sdk-go-v2/service/sqs/types/types.go | 224 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 58 + .../aws-sdk-go-v2/service/sso/api_client.go | 108 +- .../service/sso/api_op_GetRoleCredentials.go | 13 +- .../service/sso/api_op_ListAccountRoles.go | 32 +- .../service/sso/api_op_ListAccounts.go | 37 +- .../service/sso/api_op_Logout.go | 37 +- .../aws/aws-sdk-go-v2/service/sso/auth.go | 8 +- .../service/sso/deserializers.go | 10 + .../aws/aws-sdk-go-v2/service/sso/doc.go | 22 +- .../aws-sdk-go-v2/service/sso/endpoints.go | 19 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 8 + .../aws/aws-sdk-go-v2/service/sso/options.go | 34 +- .../aws-sdk-go-v2/service/sso/types/types.go | 20 +- .../service/ssooidc/CHANGELOG.md | 62 + .../service/ssooidc/api_client.go | 108 +- .../service/ssooidc/api_op_CreateToken.go | 62 +- .../ssooidc/api_op_CreateTokenWithIAM.go | 69 +- .../service/ssooidc/api_op_RegisterClient.go | 25 + .../api_op_StartDeviceAuthorization.go | 17 +- .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 8 +- .../service/ssooidc/deserializers.go | 101 + .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 40 +- .../service/ssooidc/endpoints.go | 19 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 8 + .../aws-sdk-go-v2/service/ssooidc/options.go | 34 +- .../service/ssooidc/serializers.go | 56 + .../service/ssooidc/types/errors.go | 32 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 54 + .../aws-sdk-go-v2/service/sts/api_client.go | 108 +- .../service/sts/api_op_AssumeRole.go | 457 +- .../service/sts/api_op_AssumeRoleWithSAML.go | 373 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 387 +- .../sts/api_op_DecodeAuthorizationMessage.go | 51 +- .../service/sts/api_op_GetAccessKeyInfo.go | 55 +- .../service/sts/api_op_GetCallerIdentity.go | 32 +- .../service/sts/api_op_GetFederationToken.go | 315 +- .../service/sts/api_op_GetSessionToken.go | 110 +- .../aws/aws-sdk-go-v2/service/sts/auth.go | 8 +- .../service/sts/deserializers.go | 9 + .../aws/aws-sdk-go-v2/service/sts/doc.go | 12 +- .../aws-sdk-go-v2/service/sts/endpoints.go | 19 +- .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 34 +- .../aws-sdk-go-v2/service/sts/types/errors.go | 26 +- .../aws-sdk-go-v2/service/sts/types/types.go | 50 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 6 + .../aws/smithy-go/go_module_metadata.go | 2 +- .../ghinstallation/v2/appsTransport.go | 4 +- .../ghinstallation/v2/transport.go | 6 +- vendor/github.com/expr-lang/expr/README.md | 2 + vendor/github.com/expr-lang/expr/ast/print.go | 2 +- .../github.com/expr-lang/expr/ast/visitor.go | 3 + .../expr-lang/expr/builtin/builtin.go | 71 +- .../expr-lang/expr/builtin/utils.go | 13 +- .../expr-lang/expr/checker/checker.go | 43 +- .../expr-lang/expr/compiler/compiler.go | 73 +- vendor/github.com/expr-lang/expr/expr.go | 41 +- .../github.com/expr-lang/expr/file/error.go | 28 +- .../expr-lang/expr/file/location.go | 8 +- .../github.com/expr-lang/expr/file/source.go | 73 +- .../expr-lang/expr/parser/lexer/lexer.go | 91 +- .../expr-lang/expr/parser/lexer/state.go | 14 +- .../expr-lang/expr/parser/parser.go | 14 +- .../expr-lang/expr/patcher/with_timezone.go | 25 + .../github.com/expr-lang/expr/vm/program.go | 6 +- .../expr-lang/expr/vm/runtime/runtime.go | 6 +- vendor/github.com/expr-lang/expr/vm/vm.go | 21 +- vendor/github.com/go-logr/logr/README.md | 1 + vendor/github.com/go-logr/logr/funcr/funcr.go | 169 +- .../go-playground/validator/v10/README.md | 3 +- .../go-playground/validator/v10/baked_in.go | 210 +- .../go-playground/validator/v10/cache.go | 2 +- .../validator/v10/country_codes.go | 2305 ++--- .../validator/v10/currency_codes.go | 148 +- .../go-playground/validator/v10/doc.go | 29 +- .../validator/v10/postcode_regexes.go | 12 +- .../go-playground/validator/v10/regexes.go | 158 +- .../go-playground/validator/v10/util.go | 5 +- .../validator/v10/validator_instance.go | 8 +- .../go-task/slim-sprig/{ => v3}/.editorconfig | 0 .../slim-sprig/{ => v3}/.gitattributes | 0 .../go-task/slim-sprig/{ => v3}/.gitignore | 0 .../go-task/slim-sprig/{ => v3}/CHANGELOG.md | 19 + .../go-task/slim-sprig/{ => v3}/LICENSE.txt | 0 .../go-task/slim-sprig/{ => v3}/README.md | 2 +- .../go-task/slim-sprig/{ => v3}/Taskfile.yml | 2 +- .../go-task/slim-sprig/{ => v3}/crypto.go | 0 .../go-task/slim-sprig/{ => v3}/date.go | 0 .../go-task/slim-sprig/{ => v3}/defaults.go | 0 .../go-task/slim-sprig/{ => v3}/dict.go | 0 .../go-task/slim-sprig/{ => v3}/doc.go | 0 .../go-task/slim-sprig/{ => v3}/functions.go | 0 .../go-task/slim-sprig/{ => v3}/list.go | 0 .../go-task/slim-sprig/{ => v3}/network.go | 0 .../go-task/slim-sprig/{ => v3}/numeric.go | 0 .../go-task/slim-sprig/{ => v3}/reflect.go | 0 .../go-task/slim-sprig/{ => v3}/regex.go | 0 .../go-task/slim-sprig/{ => v3}/strings.go | 0 .../go-task/slim-sprig/{ => v3}/url.go | 0 .../google/go-github/{v60 => v62}/AUTHORS | 3 +- .../google/go-github/{v60 => v62}/LICENSE | 0 .../go-github/{v60 => v62}/github/actions.go | 0 .../{v60 => v62}/github/actions_artifacts.go | 0 .../{v60 => v62}/github/actions_cache.go | 0 .../{v60 => v62}/github/actions_oidc.go | 0 .../github/actions_permissions_enterprise.go | 0 .../github/actions_permissions_orgs.go | 0 .../github/actions_required_workflows.go | 0 .../github/actions_runner_groups.go | 0 .../{v60 => v62}/github/actions_runners.go | 10 +- .../{v60 => v62}/github/actions_secrets.go | 10 +- .../{v60 => v62}/github/actions_variables.go | 30 +- .../github/actions_workflow_jobs.go | 0 .../github/actions_workflow_runs.go | 0 .../{v60 => v62}/github/actions_workflows.go | 0 .../go-github/{v60 => v62}/github/activity.go | 0 .../{v60 => v62}/github/activity_events.go | 0 .../github/activity_notifications.go | 0 .../{v60 => v62}/github/activity_star.go | 0 .../{v60 => v62}/github/activity_watching.go | 0 .../go-github/{v60 => v62}/github/admin.go | 4 +- .../{v60 => v62}/github/admin_orgs.go | 6 +- .../{v60 => v62}/github/admin_stats.go | 2 +- .../{v60 => v62}/github/admin_users.go | 8 +- .../go-github/{v60 => v62}/github/apps.go | 39 + .../{v60 => v62}/github/apps_hooks.go | 0 .../github/apps_hooks_deliveries.go | 0 .../{v60 => v62}/github/apps_installation.go | 0 .../{v60 => v62}/github/apps_manifest.go | 0 .../{v60 => v62}/github/apps_marketplace.go | 0 .../{v60 => v62}/github/authorizations.go | 4 +- .../go-github/{v60 => v62}/github/billing.go | 0 .../go-github/{v60 => v62}/github/checks.go | 0 .../{v60 => v62}/github/code-scanning.go | 0 .../{v60 => v62}/github/codesofconduct.go | 0 .../{v60 => v62}/github/codespaces.go | 0 .../{v60 => v62}/github/codespaces_secrets.go | 0 .../go-github/{v60 => v62}/github/copilot.go | 20 +- .../{v60 => v62}/github/dependabot.go | 0 .../{v60 => v62}/github/dependabot_alerts.go | 0 .../{v60 => v62}/github/dependabot_secrets.go | 0 .../{v60 => v62}/github/dependency_graph.go | 0 .../v62/github/dependency_graph_snapshots.go | 113 + .../go-github/{v60 => v62}/github/doc.go | 2 +- .../go-github/{v60 => v62}/github/emojis.go | 0 .../{v60 => v62}/github/enterprise.go | 0 .../enterprise_actions_runner_groups.go | 0 .../github/enterprise_actions_runners.go | 0 .../github/enterprise_audit_log.go | 0 .../enterprise_code_security_and_analysis.go | 0 .../go-github/{v60 => v62}/github/event.go | 0 .../{v60 => v62}/github/event_types.go | 29 +- .../go-github/{v60 => v62}/github/gists.go | 0 .../{v60 => v62}/github/gists_comments.go | 0 .../go-github/{v60 => v62}/github/git.go | 0 .../{v60 => v62}/github/git_blobs.go | 0 .../{v60 => v62}/github/git_commits.go | 0 .../go-github/{v60 => v62}/github/git_refs.go | 0 .../go-github/{v60 => v62}/github/git_tags.go | 0 .../{v60 => v62}/github/git_trees.go | 0 .../{v60 => v62}/github/github-accessors.go | 272 + .../go-github/{v60 => v62}/github/github.go | 97 +- .../{v60 => v62}/github/gitignore.go | 0 .../{v60 => v62}/github/interactions.go | 0 .../{v60 => v62}/github/interactions_orgs.go | 0 .../{v60 => v62}/github/interactions_repos.go | 0 .../{v60 => v62}/github/issue_import.go | 0 .../go-github/{v60 => v62}/github/issues.go | 0 .../{v60 => v62}/github/issues_assignees.go | 0 .../{v60 => v62}/github/issues_comments.go | 0 .../{v60 => v62}/github/issues_events.go | 0 .../{v60 => v62}/github/issues_labels.go | 0 .../{v60 => v62}/github/issues_milestones.go | 0 .../{v60 => v62}/github/issues_timeline.go | 0 .../go-github/{v60 => v62}/github/licenses.go | 0 .../go-github/{v60 => v62}/github/markdown.go | 0 .../go-github/{v60 => v62}/github/messages.go | 0 .../go-github/{v60 => v62}/github/meta.go | 14 +- .../{v60 => v62}/github/migrations.go | 0 .../github/migrations_source_import.go | 0 .../{v60 => v62}/github/migrations_user.go | 0 .../go-github/{v60 => v62}/github/orgs.go | 0 .../github/orgs_actions_allowed.go | 0 .../github/orgs_actions_permissions.go | 0 .../{v60 => v62}/github/orgs_audit_log.go | 0 .../github/orgs_credential_authorizations.go | 0 .../{v60 => v62}/github/orgs_custom_roles.go | 54 + .../{v60 => v62}/github/orgs_hooks.go | 0 .../github/orgs_hooks_configuration.go | 0 .../github/orgs_hooks_deliveries.go | 0 .../{v60 => v62}/github/orgs_members.go | 0 .../github/orgs_outside_collaborators.go | 0 .../{v60 => v62}/github/orgs_packages.go | 0 .../github/orgs_personal_access_tokens.go | 0 .../{v60 => v62}/github/orgs_projects.go | 0 .../{v60 => v62}/github/orgs_properties.go | 0 .../{v60 => v62}/github/orgs_rules.go | 0 .../github/orgs_security_managers.go | 0 .../github/orgs_users_blocking.go | 0 .../go-github/{v60 => v62}/github/packages.go | 0 .../go-github/{v60 => v62}/github/projects.go | 0 .../go-github/{v60 => v62}/github/pulls.go | 0 .../{v60 => v62}/github/pulls_comments.go | 0 .../{v60 => v62}/github/pulls_reviewers.go | 0 .../{v60 => v62}/github/pulls_reviews.go | 0 .../{v60 => v62}/github/pulls_threads.go | 0 .../{v60 => v62}/github/rate_limit.go | 24 +- .../{v60 => v62}/github/reactions.go | 0 .../go-github/{v60 => v62}/github/repos.go | 32 + .../github/repos_actions_access.go | 0 .../github/repos_actions_allowed.go | 0 .../github/repos_actions_permissions.go | 0 .../{v60 => v62}/github/repos_autolinks.go | 2 +- .../{v60 => v62}/github/repos_codeowners.go | 0 .../github/repos_collaborators.go | 0 .../{v60 => v62}/github/repos_comments.go | 0 .../{v60 => v62}/github/repos_commits.go | 0 .../github/repos_community_health.go | 0 .../{v60 => v62}/github/repos_contents.go | 0 .../repos_deployment_branch_policies.go | 0 .../repos_deployment_protection_rules.go | 0 .../{v60 => v62}/github/repos_deployments.go | 0 .../{v60 => v62}/github/repos_environments.go | 0 .../{v60 => v62}/github/repos_forks.go | 0 .../{v60 => v62}/github/repos_hooks.go | 0 .../github/repos_hooks_configuration.go | 0 .../github/repos_hooks_deliveries.go | 0 .../{v60 => v62}/github/repos_invitations.go | 0 .../{v60 => v62}/github/repos_keys.go | 0 .../{v60 => v62}/github/repos_lfs.go | 0 .../{v60 => v62}/github/repos_merging.go | 0 .../{v60 => v62}/github/repos_pages.go | 0 .../github/repos_prereceive_hooks.go | 8 +- .../{v60 => v62}/github/repos_projects.go | 0 .../{v60 => v62}/github/repos_properties.go | 27 + .../{v60 => v62}/github/repos_releases.go | 0 .../{v60 => v62}/github/repos_rules.go | 21 +- .../{v60 => v62}/github/repos_stats.go | 0 .../{v60 => v62}/github/repos_statuses.go | 0 .../{v60 => v62}/github/repos_tags.go | 0 .../{v60 => v62}/github/repos_traffic.go | 0 .../go-github/{v60 => v62}/github/scim.go | 0 .../go-github/{v60 => v62}/github/search.go | 0 .../{v60 => v62}/github/secret_scanning.go | 0 .../github/security_advisories.go | 0 .../go-github/{v60 => v62}/github/strings.go | 0 .../go-github/{v60 => v62}/github/teams.go | 3 + .../github/teams_discussion_comments.go | 0 .../{v60 => v62}/github/teams_discussions.go | 0 .../{v60 => v62}/github/teams_members.go | 0 .../{v60 => v62}/github/timestamp.go | 0 .../go-github/{v60 => v62}/github/users.go | 0 .../github/users_administration.go | 8 +- .../{v60 => v62}/github/users_blocking.go | 0 .../{v60 => v62}/github/users_emails.go | 0 .../{v60 => v62}/github/users_followers.go | 0 .../{v60 => v62}/github/users_gpg_keys.go | 0 .../{v60 => v62}/github/users_keys.go | 0 .../{v60 => v62}/github/users_packages.go | 0 .../{v60 => v62}/github/users_projects.go | 0 .../github/users_ssh_signing_keys.go | 0 .../{v60 => v62}/github/with_appengine.go | 0 .../{v60 => v62}/github/without_appengine.go | 0 .../github.com/google/pprof/profile/encode.go | 3 + .../pprof/profile/legacy_java_profile.go | 4 +- .../github.com/google/pprof/profile/merge.go | 4 +- .../google/pprof/profile/profile.go | 16 +- .../proto/common_go_proto/common.pb.go | 99 +- .../s2a_context_go_proto/s2a_context.pb.go | 6 +- .../internal/proto/s2a_go_proto/s2a.pb.go | 28 +- .../proto/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../proto/v2/common_go_proto/common.pb.go | 296 +- .../v2/s2a_context_go_proto/s2a_context.pb.go | 73 +- .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 772 +- .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../google/s2a-go/internal/record/record.go | 34 +- .../internal/tokenmanager/tokenmanager.go | 15 +- .../google/s2a-go/internal/v2/s2av2.go | 40 +- .../v2/tlsconfigstore/tlsconfigstore.go | 15 +- vendor/github.com/google/s2a-go/s2a.go | 103 +- .../github.com/google/s2a-go/s2a_options.go | 28 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 14 + .../googleapis/gax-go/v2/apierror/apierror.go | 4 +- .../github.com/googleapis/gax-go/v2/header.go | 31 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- .../gophercloud/gophercloud/CHANGELOG.md | 22 + .../gophercloud/gophercloud/params.go | 21 +- .../gophercloud/provider_client.go | 2 +- .../providers/prometheus/server_metrics.go | 3 +- .../v2/interceptors/callmeta.go | 66 + .../v2/interceptors/client.go | 23 +- .../v2/interceptors/reporter.go | 63 +- .../v2/interceptors/server.go | 25 +- .../grpc-gateway/v2/runtime/context.go | 17 +- .../grpc-gateway/v2/runtime/errors.go | 10 +- .../grpc-gateway/v2/runtime/handler.go | 46 +- .../grpc-gateway/v2/runtime/marshal_json.go | 5 + .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 15 +- .../v2/runtime/marshaler_registry.go | 2 +- .../grpc-gateway/v2/runtime/pattern.go | 18 +- .../hashicorp/vault/api/lifetime_watcher.go | 9 +- vendor/github.com/jackc/pgx/v5/CHANGELOG.md | 19 + .../github.com/jackc/pgx/v5/CONTRIBUTING.md | 1 + vendor/github.com/jackc/pgx/v5/README.md | 2 +- vendor/github.com/jackc/pgx/v5/batch.go | 16 +- vendor/github.com/jackc/pgx/v5/conn.go | 4 +- vendor/github.com/jackc/pgx/v5/doc.go | 11 +- .../jackc/pgx/v5/extended_query_builder.go | 88 +- .../jackc/pgx/v5/internal/anynil/anynil.go | 36 - .../github.com/jackc/pgx/v5/large_objects.go | 7 +- vendor/github.com/jackc/pgx/v5/named_args.go | 58 +- .../github.com/jackc/pgx/v5/pgconn/config.go | 46 +- .../ctxwatch/context_watcher.go | 25 +- vendor/github.com/jackc/pgx/v5/pgconn/doc.go | 16 +- .../github.com/jackc/pgx/v5/pgconn/errors.go | 76 +- .../github.com/jackc/pgx/v5/pgconn/pgconn.go | 399 +- .../jackc/pgx/v5/pgproto3/pgproto3.go | 2 +- .../jackc/pgx/v5/pgtype/array_codec.go | 3 +- vendor/github.com/jackc/pgx/v5/pgtype/doc.go | 10 + .../jackc/pgx/v5/pgtype/interval.go | 33 +- vendor/github.com/jackc/pgx/v5/pgtype/json.go | 47 +- .../github.com/jackc/pgx/v5/pgtype/jsonb.go | 28 +- .../github.com/jackc/pgx/v5/pgtype/pgtype.go | 68 +- .../jackc/pgx/v5/pgtype/pgtype_default.go | 9 +- vendor/github.com/jackc/pgx/v5/pgtype/time.go | 43 +- .../jackc/pgx/v5/pgtype/timestamp.go | 39 +- .../jackc/pgx/v5/pgtype/timestamptz.go | 39 +- .../github.com/jackc/pgx/v5/pgxpool/conn.go | 4 + vendor/github.com/jackc/pgx/v5/pgxpool/doc.go | 2 +- .../github.com/jackc/pgx/v5/pgxpool/pool.go | 26 +- .../github.com/jackc/pgx/v5/pgxpool/tracer.go | 33 + vendor/github.com/jackc/pgx/v5/rows.go | 266 +- vendor/github.com/jackc/pgx/v5/stdlib/sql.go | 2 +- vendor/github.com/jackc/pgx/v5/values.go | 9 - .../github.com/klauspost/compress/README.md | 7 + .../compress/internal/snapref/encode_other.go | 2 +- .../klauspost/compress/s2/writer.go | 2 +- .../klauspost/compress/zstd/blockdec.go | 3 + .../klauspost/compress/zstd/blockenc.go | 20 + .../klauspost/compress/zstd/decoder.go | 2 +- .../klauspost/compress/zstd/enc_best.go | 12 + .../klauspost/compress/zstd/enc_better.go | 13 +- .../{ => v2}/LICENSE | 0 .../{ => v2}/NOTICE | 0 .../{ => v2}/pbutil/.gitignore | 0 .../{ => v2}/pbutil/Makefile | 0 .../{ => v2}/pbutil/decode.go | 16 +- .../{ => v2}/pbutil/doc.go | 0 .../{ => v2}/pbutil/encode.go | 5 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 56 + .../github.com/onsi/ginkgo/v2/CONTRIBUTING.md | 10 +- vendor/github.com/onsi/ginkgo/v2/Makefile | 11 + .../ginkgo/v2/ginkgo/build/build_command.go | 15 +- .../v2/ginkgo/generators/bootstrap_command.go | 2 +- .../v2/ginkgo/generators/generate_command.go | 3 +- .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 14 +- .../ginkgo/internal/profiles_and_reports.go | 2 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 9 + .../onsi/ginkgo/v2/internal/suite.go | 7 +- .../ginkgo/v2/reporters/default_reporter.go | 16 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 2 + vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 8 +- .../github.com/onsi/ginkgo/v2/types/config.go | 18 +- .../onsi/ginkgo/v2/types/label_filter.go | 229 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 30 + vendor/github.com/onsi/gomega/gomega_dsl.go | 6 +- .../gomega/matchers/have_exact_elements.go | 7 +- .../bipartitegraph/bipartitegraphmatching.go | 7 + .../prometheus/collectors/expvar_collector.go | 2 +- .../collectors/go_collector_latest.go | 9 +- .../client_golang/prometheus/counter.go | 26 +- .../client_golang/prometheus/desc.go | 28 +- .../prometheus/expvar_collector.go | 2 +- .../client_golang/prometheus/gauge.go | 8 +- .../client_golang/prometheus/histogram.go | 170 +- .../prometheus/internal/difflib.go | 2 +- .../client_golang/prometheus/labels.go | 60 +- .../client_golang/prometheus/metric.go | 3 + .../prometheus/process_collector_other.go | 4 +- .../prometheus/process_collector_wasip1.go | 26 + .../prometheus/promhttp/instrument_server.go | 9 +- .../client_golang/prometheus/registry.go | 6 +- .../client_golang/prometheus/summary.go | 41 +- .../prometheus/testutil/promlint/problem.go | 33 + .../prometheus/testutil/promlint/promlint.go | 308 +- .../testutil/promlint/validation.go | 33 + .../validations/counter_validations.go | 40 + .../validations/generic_name_validations.go | 101 + .../promlint/validations/help_validations.go | 32 + .../validations/histogram_validations.go | 63 + .../testutil/promlint/validations/units.go | 118 + .../prometheus/testutil/testutil.go | 15 + .../client_golang/prometheus/value.go | 55 +- .../client_golang/prometheus/vec.go | 106 +- .../prometheus/client_model/go/metrics.pb.go | 350 +- .../prometheus/common/config/http_config.go | 32 +- .../prometheus/common/expfmt/decode.go | 2 +- .../prometheus/common/expfmt/encode.go | 2 +- .../prometheus/procfs/.golangci.yml | 7 + .../prometheus/procfs/Makefile.common | 4 +- .../github.com/prometheus/procfs/buddyinfo.go | 4 +- vendor/github.com/prometheus/procfs/mdstat.go | 42 +- .../prometheus/procfs/mountstats.go | 4 +- vendor/github.com/prometheus/procfs/proc.go | 2 +- .../prometheus/procfs/proc_smaps.go | 2 +- .../rabbitmq/amqp091-go/CHANGELOG.md | 68 + .../github.com/rabbitmq/amqp091-go/channel.go | 56 +- .../rabbitmq/amqp091-go/connection.go | 79 +- vendor/github.com/rabbitmq/amqp091-go/doc.go | 33 +- .../github.com/rabbitmq/amqp091-go/types.go | 57 +- vendor/github.com/rabbitmq/amqp091-go/uri.go | 78 +- .../github.com/redis/go-redis/v9/CHANGELOG.md | 9 + vendor/github.com/redis/go-redis/v9/Makefile | 7 +- vendor/github.com/redis/go-redis/v9/README.md | 3 - .../redis/go-redis/v9/bitmap_commands.go | 26 +- .../github.com/redis/go-redis/v9/command.go | 12 +- vendor/github.com/redis/go-redis/v9/error.go | 3 + .../redis/go-redis/v9/hash_commands.go | 278 +- .../redis/go-redis/v9/internal/pool/conn.go | 22 + .../go-redis/v9/internal/pool/conn_check.go | 11 +- .../v9/internal/pool/conn_check_dummy.go | 4 +- .../redis/go-redis/v9/internal/pool/pool.go | 10 +- .../redis/go-redis/v9/internal/util.go | 17 + .../github.com/redis/go-redis/v9/options.go | 12 +- .../redis/go-redis/v9/osscluster.go | 76 +- vendor/github.com/redis/go-redis/v9/pubsub.go | 4 +- vendor/github.com/redis/go-redis/v9/redis.go | 11 +- .../redis/go-redis/v9/stream_commands.go | 14 +- .../github.com/redis/go-redis/v9/version.go | 2 +- vendor/github.com/sergi/go-diff/AUTHORS | 25 + vendor/github.com/sergi/go-diff/CONTRIBUTORS | 32 + vendor/github.com/sergi/go-diff/LICENSE | 20 + .../sergi/go-diff/diffmatchpatch/diff.go | 1352 +++ .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 + .../sergi/go-diff/diffmatchpatch/match.go | 160 + .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 + .../diffmatchpatch/operation_string.go | 17 + .../sergi/go-diff/diffmatchpatch/patch.go | 556 + .../go-diff/diffmatchpatch/stringutil.go | 106 + vendor/github.com/tidwall/gjson/README.md | 8 +- vendor/github.com/tidwall/gjson/SYNTAX.md | 8 +- vendor/github.com/tidwall/gjson/gjson.go | 19 +- vendor/github.com/tidwall/gjson/logo.png | Bin 15936 -> 0 bytes vendor/github.com/youmark/pkcs8/.travis.yml | 14 - .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../etcd/client/pkg/v3/logutil/zap.go | 2 +- .../etcd/client/pkg/v3/transport/listener.go | 50 +- vendor/go.etcd.io/etcd/client/v3/client.go | 2 +- .../etcd/client/v3/retry_interceptor.go | 20 +- vendor/go.etcd.io/etcd/client/v3/watch.go | 2 +- .../bson/bsoncodec/default_value_decoders.go | 4 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 8 +- .../bson/bsonrw/extjson_wrappers.go | 4 +- .../mongo-driver/bson/bsonrw/value_reader.go | 12 +- .../mongo-driver/bson/raw_value.go | 8 +- .../mongo-driver/bson/registry.go | 18 +- .../mongo-driver/internal/logger/io_sink.go | 7 +- .../mongo-driver/mongo/change_stream.go | 6 + .../mongo-driver/mongo/client.go | 4 - .../mongo-driver/mongo/collection.go | 14 +- .../mongo-driver/mongo/database.go | 2 +- .../mongo/options/clientoptions.go | 15 +- .../mongo/options/mongooptions.go | 2 +- .../mongo/options/searchindexoptions.go | 7 + .../mongo-driver/mongo/search_index_view.go | 43 +- .../mongo/writeconcern/writeconcern.go | 2 +- .../mongo-driver/version/version.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 40 +- .../mongo-driver/x/bsonx/bsoncore/doc.go | 23 +- .../mongo-driver/x/mongo/driver/DESIGN.md | 27 - .../x/mongo/driver/auth/creds/doc.go | 14 + .../mongo-driver/x/mongo/driver/auth/doc.go | 21 +- .../x/mongo/driver/compression.go | 21 +- .../x/mongo/driver/connstring/connstring.go | 7 + .../mongo-driver/x/mongo/driver/dns/dns.go | 7 + .../mongo-driver/x/mongo/driver/driver.go | 7 + .../mongocrypt/mongocrypt_not_enabled.go | 7 + .../x/mongo/driver/mongocrypt/options/doc.go | 14 + .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 7 + .../mongo-driver/x/mongo/driver/operation.go | 21 +- .../driver/operation/create_search_indexes.go | 42 +- .../x/mongo/driver/operation/doc.go | 14 + .../driver/operation/drop_search_index.go | 37 +- .../driver/operation/update_search_index.go | 39 +- .../x/mongo/driver/session/doc.go | 14 + .../x/mongo/driver/topology/connection.go | 4 +- .../x/mongo/driver/topology/rtt_monitor.go | 6 - .../x/mongo/driver/topology/server.go | 7 +- .../x/mongo/driver/topology/topology.go | 17 +- .../x/mongo/driver/wiremessage/wiremessage.go | 40 +- .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/.codespellrc | 2 +- vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 6 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 98 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 4 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 13 +- vendor/go.opentelemetry.io/otel/Makefile | 33 +- vendor/go.opentelemetry.io/otel/README.md | 14 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 + .../otel/attribute/value.go | 18 +- .../otel/baggage/baggage.go | 10 +- .../otlp/otlpmetric/otlpmetricgrpc/README.md | 3 + .../otlp/otlpmetric/otlpmetricgrpc/client.go | 17 +- .../otlp/otlpmetric/otlpmetricgrpc/config.go | 15 +- .../otlp/otlpmetric/otlpmetricgrpc/doc.go | 19 +- .../otlpmetric/otlpmetricgrpc/exporter.go | 15 +- .../internal/envconfig/envconfig.go | 13 +- .../otlpmetric/otlpmetricgrpc/internal/gen.go | 13 +- .../internal/oconf/envconfig.go | 13 +- .../otlpmetricgrpc/internal/oconf/options.go | 27 +- .../internal/oconf/optiontypes.go | 13 +- .../otlpmetricgrpc/internal/oconf/tls.go | 13 +- .../otlpmetricgrpc/internal/partialsuccess.go | 13 +- .../otlpmetricgrpc/internal/retry/retry.go | 15 +- .../internal/transform/attribute.go | 13 +- .../internal/transform/error.go | 15 +- .../internal/transform/metricdata.go | 58 +- .../otlp/otlpmetric/otlpmetricgrpc/version.go | 15 +- .../otlp/otlpmetric/otlpmetrichttp/client.go | 5 +- .../otlpmetric/otlpmetrichttp/exporter.go | 2 +- .../otlpmetrichttp/internal/retry/retry.go | 2 +- .../internal/transform/error.go | 2 +- .../internal/transform/metricdata.go | 2 +- .../otlp/otlpmetric/otlpmetrichttp/version.go | 2 +- .../otel/internal/attribute/attribute.go | 24 +- .../otel/internal/global/instruments.go | 52 + .../otel/internal/global/meter.go | 23 + .../otel/internal/global/trace.go | 6 +- .../otel/metric/asyncfloat64.go | 6 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 18 + .../otel/metric/embedded/embedded.go | 20 + .../otel/metric/instrument.go | 22 + .../go.opentelemetry.io/otel/metric/meter.go | 64 + .../otel/metric/noop/noop.go | 28 + .../otel/metric/syncfloat64.go | 60 +- .../otel/metric/syncint64.go | 54 +- vendor/go.opentelemetry.io/otel/renovate.json | 24 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/internal/env/env.go | 2 +- .../otel/sdk/internal/gen.go | 18 - .../otel/sdk/internal/internal.go | 17 - .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/metric/config.go | 2 +- .../otel/sdk/metric/exemplar.go | 94 +- .../otel/sdk/metric/instrument.go | 35 +- .../otel/sdk/metric/instrumentkind_string.go | 5 +- .../metric/internal/aggregate/aggregate.go | 35 +- .../sdk/metric/internal/aggregate/exemplar.go | 42 + .../aggregate/exponential_histogram.go | 14 +- .../metric/internal/aggregate/histogram.go | 16 +- .../metric/internal/aggregate/lastvalue.go | 112 +- .../otel/sdk/metric/internal/aggregate/sum.go | 22 +- .../otel/sdk/metric/internal/exemplar/drop.go | 10 +- .../sdk/metric/internal/exemplar/exemplar.go | 29 + .../sdk/metric/internal/exemplar/filter.go | 28 +- .../internal/exemplar/filtered_reservoir.go | 49 + .../otel/sdk/metric/internal/exemplar/hist.go | 23 +- .../otel/sdk/metric/internal/exemplar/rand.go | 35 +- .../sdk/metric/internal/exemplar/reservoir.go | 7 +- .../sdk/metric/internal/exemplar/storage.go | 23 +- .../sdk/metric/internal/exemplar/value.go | 57 + .../otel/sdk/metric/meter.go | 30 + .../otel/sdk/metric/periodic_reader.go | 2 +- .../otel/sdk/metric/pipeline.go | 16 +- .../otel/sdk/metric/reader.go | 4 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/metric/view.go | 2 +- .../otel/sdk/resource/builtin.go | 23 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/resource/resource.go | 11 +- .../otel/sdk/trace/batch_span_processor.go | 2 +- .../otel/sdk/trace/evictedqueue.go | 36 +- .../otel/sdk/trace/id_generator.go | 21 +- .../otel/sdk/trace/provider.go | 2 +- .../otel/sdk/trace/span.go | 75 +- .../otel/sdk/trace/tracer.go | 4 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/v1.24.0/README.md | 3 - .../otel/semconv/v1.24.0/attribute_group.go | 4387 -------- .../otel/semconv/v1.24.0/event.go | 200 - .../otel/semconv/v1.24.0/resource.go | 2545 ----- .../otel/semconv/v1.24.0/trace.go | 1323 --- .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 +++++++++++++++++ .../otel/semconv/{v1.24.0 => v1.26.0}/doc.go | 4 +- .../semconv/{v1.24.0 => v1.26.0}/exception.go | 2 +- .../semconv/{v1.24.0 => v1.26.0}/metric.go | 466 +- .../semconv/{v1.24.0 => v1.26.0}/schema.go | 4 +- .../otel/trace/noop/noop.go | 4 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 7 +- vendor/golang.org/x/crypto/cast5/cast5.go | 2 +- .../chacha20poly1305/chacha20poly1305.go | 2 +- .../x/crypto/cryptobyte/asn1/asn1.go | 2 +- .../golang.org/x/crypto/cryptobyte/string.go | 2 +- vendor/golang.org/x/crypto/hkdf/hkdf.go | 2 +- vendor/golang.org/x/crypto/md4/md4.go | 2 +- .../x/crypto/nacl/secretbox/secretbox.go | 2 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 2 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- .../x/crypto/salsa20/salsa/hsalsa20.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- vendor/golang.org/x/crypto/sha3/doc.go | 2 +- vendor/golang.org/x/crypto/sha3/hashes.go | 42 +- .../x/crypto/sha3/hashes_generic.go | 27 - .../golang.org/x/crypto/sha3/hashes_noasm.go | 23 + vendor/golang.org/x/crypto/sha3/register.go | 18 - vendor/golang.org/x/crypto/sha3/sha3.go | 62 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 67 +- vendor/golang.org/x/crypto/sha3/shake.go | 16 +- .../golang.org/x/crypto/sha3/shake_generic.go | 19 - .../golang.org/x/crypto/sha3/shake_noasm.go | 15 + vendor/golang.org/x/crypto/sha3/xor.go | 45 +- .../golang.org/x/crypto/sha3/xor_generic.go | 28 - .../golang.org/x/crypto/sha3/xor_unaligned.go | 66 - vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/exp/slices/sort.go | 4 +- vendor/golang.org/x/mod/modfile/rule.go | 106 +- vendor/golang.org/x/mod/modfile/work.go | 52 +- vendor/golang.org/x/net/html/doc.go | 2 +- .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/frame.go | 13 +- vendor/golang.org/x/net/http2/http2.go | 19 +- vendor/golang.org/x/net/http2/server.go | 105 +- vendor/golang.org/x/net/http2/testsync.go | 331 - vendor/golang.org/x/net/http2/timer.go | 20 + vendor/golang.org/x/net/http2/transport.go | 329 +- .../x/net/http2/writesched_priority.go | 4 +- vendor/golang.org/x/net/proxy/per_host.go | 8 +- vendor/golang.org/x/net/websocket/hybi.go | 5 +- .../golang.org/x/net/websocket/websocket.go | 7 +- vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/google/google.go | 5 +- vendor/golang.org/x/sync/LICENSE | 4 +- vendor/golang.org/x/sys/unix/mremap.go | 5 + .../golang.org/x/sys/unix/syscall_darwin.go | 12 + vendor/golang.org/x/sys/unix/syscall_unix.go | 9 + .../x/sys/unix/zsyscall_darwin_amd64.go | 33 + .../x/sys/unix/zsyscall_darwin_amd64.s | 10 + .../x/sys/unix/zsyscall_darwin_arm64.go | 33 + .../x/sys/unix/zsyscall_darwin_arm64.s | 10 + .../x/sys/windows/security_windows.go | 24 +- .../x/sys/windows/zsyscall_windows.go | 9 + vendor/golang.org/x/text/message/message.go | 19 +- .../x/tools/go/ast/astutil/enclosing.go | 24 +- .../golang.org/x/tools/go/ast/astutil/util.go | 1 + .../tools/go/internal/packagesdriver/sizes.go | 53 - vendor/golang.org/x/tools/go/packages/doc.go | 8 - .../x/tools/go/packages/external.go | 22 +- .../golang.org/x/tools/go/packages/golist.go | 126 +- .../x/tools/go/packages/packages.go | 134 +- .../x/tools/go/types/objectpath/objectpath.go | 59 +- .../x/tools/internal/aliases/aliases.go | 12 +- .../x/tools/internal/aliases/aliases_go121.go | 15 +- .../x/tools/internal/aliases/aliases_go122.go | 69 +- .../x/tools/internal/event/tag/tag.go | 59 - .../x/tools/internal/gcimporter/iexport.go | 19 +- .../x/tools/internal/gcimporter/iimport.go | 4 +- .../tools/internal/gcimporter/ureader_yes.go | 4 +- .../x/tools/internal/gocommand/invoke.go | 123 +- .../x/tools/internal/gocommand/vendor.go | 54 + .../x/tools/internal/imports/fix.go | 395 +- .../x/tools/internal/imports/mod.go | 56 +- .../x/tools/internal/pkgbits/decoder.go | 4 + .../x/tools/internal/stdlib/manifest.go | 111 + .../tools/internal/typesinternal/errorcode.go | 4 +- .../x/tools/internal/typesinternal/types.go | 15 + .../x/tools/internal/versions/types_go122.go | 2 +- .../iamcredentials/v1/iamcredentials-gen.go | 32 +- .../google.golang.org/api/internal/creds.go | 42 +- .../api/internal/gensupport/resumable.go | 4 + .../api/internal/gensupport/retry.go | 34 +- .../internal/gensupport/retryable_linux.go | 16 - .../api/internal/gensupport/send.go | 24 +- .../api/internal/settings.go | 2 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-api.json | 150 +- .../api/storage/v1/storage-gen.go | 539 +- .../api/transport/grpc/dial.go | 18 +- .../api/transport/http/dial.go | 1 - .../api/annotations/annotations.pb.go | 4 +- .../googleapis/api/annotations/client.pb.go | 30 +- .../api/annotations/field_behavior.pb.go | 2 +- .../api/annotations/field_info.pb.go | 161 +- .../googleapis/api/annotations/http.pb.go | 52 +- .../googleapis/api/annotations/resource.pb.go | 13 +- .../googleapis/api/annotations/routing.pb.go | 4 +- .../api/distribution/distribution.pb.go | 4 +- .../api/expr/v1alpha1/checked.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/eval.pb.go | 2 +- .../api/expr/v1alpha1/explain.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/syntax.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/value.pb.go | 2 +- .../googleapis/api/httpbody/httpbody.pb.go | 4 +- .../genproto/googleapis/api/label/label.pb.go | 4 +- .../googleapis/api/launch_stage.pb.go | 4 +- .../googleapis/api/metric/metric.pb.go | 4 +- .../api/monitoredres/monitored_resource.pb.go | 2 +- .../googleapis/cloud/location/locations.pb.go | 631 ++ .../type/calendarperiod/calendar_period.pb.go | 4 +- .../genproto/googleapis/type/date/date.pb.go | 4 +- .../genproto/googleapis/type/expr/expr.pb.go | 4 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 2 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 1 + vendor/google.golang.org/grpc/Makefile | 7 +- vendor/google.golang.org/grpc/README.md | 2 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 13 +- .../grpc/balancer/grpclb/grpclb_config.go | 4 +- .../grpc/balancer/grpclb/grpclb_picker.go | 6 +- .../{ => balancer/pickfirst}/pickfirst.go | 70 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_wrapper.go | 4 + .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 96 +- .../grpc/cmd/protoc-gen-go-grpc/README.md | 11 +- .../grpc/cmd/protoc-gen-go-grpc/grpc.go | 157 +- .../grpc/cmd/protoc-gen-go-grpc/main.go | 9 +- .../protoc-gen-go-grpc_test.sh | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 - vendor/google.golang.org/grpc/codes/codes.go | 2 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 260 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 67 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- .../grpc/credentials/credentials.go | 6 +- .../google.golang.org/grpc/credentials/tls.go | 34 +- vendor/google.golang.org/grpc/dialoptions.go | 82 +- .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 24 +- .../grpc/internal/backoff/backoff.go | 4 +- .../balancer/gracefulswitch/config.go | 1 - .../balancer/gracefulswitch/gracefulswitch.go | 1 - .../grpc/internal/binarylog/method_logger.go | 6 +- .../grpc/internal/envconfig/envconfig.go | 9 +- .../grpc/internal/grpcrand/grpcrand.go | 100 - .../grpc/internal/grpcrand/grpcrand_go1.21.go | 73 - .../grpc/internal/grpcutil/compressor.go | 5 - .../grpc/internal/internal.go | 37 +- .../internal/resolver/dns/dns_resolver.go | 40 +- .../resolver/dns/internal/internal.go | 19 +- .../grpc/internal/transport/controlbuf.go | 33 +- .../grpc/internal/transport/http2_client.go | 68 +- .../grpc/internal/transport/http2_server.go | 16 +- .../grpc/internal/transport/transport.go | 2 +- vendor/google.golang.org/grpc/peer/peer.go | 30 + .../google.golang.org/grpc/picker_wrapper.go | 84 +- .../grpc/reflection/README.md | 18 + .../grpc/reflection/adapt.go | 57 + .../grpc_reflection_v1/reflection.pb.go | 953 ++ .../grpc_reflection_v1/reflection_grpc.pb.go | 165 + .../grpc_reflection_v1alpha/reflection.pb.go | 1028 ++ .../reflection_grpc.pb.go | 162 + .../grpc/reflection/internal/internal.go | 436 + .../grpc/reflection/serverreflection.go | 160 + vendor/google.golang.org/grpc/regenerate.sh | 6 +- .../grpc/resolver/dns/dns_resolver.go | 12 +- .../grpc/resolver_wrapper.go | 2 +- vendor/google.golang.org/grpc/rpc_util.go | 3 +- vendor/google.golang.org/grpc/server.go | 16 +- .../google.golang.org/grpc/service_config.go | 32 +- vendor/google.golang.org/grpc/stats/stats.go | 10 +- vendor/google.golang.org/grpc/stream.go | 5 +- .../grpc/stream_interfaces.go | 152 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 195 - .../protoc-gen-go/internal_gengo/reflect.go | 6 +- .../internal_gengo/well_known_types.go | 40 +- .../protobuf/compiler/protogen/protogen.go | 2 +- .../protobuf/encoding/protojson/decode.go | 4 +- .../protobuf/encoding/prototext/decode.go | 4 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/errors.go | 6 +- .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 +- .../protobuf/internal/filedesc/desc_lazy.go | 5 + .../protobuf/internal/filetype/build.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 3 + .../protobuf/internal/impl/api_export.go | 6 +- .../protobuf/internal/impl/checkinit.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 22 + .../internal/impl/codec_messageset.go | 22 + .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_list.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/encode.go | 48 +- .../protobuf/internal/impl/extension.go | 8 +- .../protobuf/internal/impl/legacy_enum.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 4 +- .../protobuf/internal/impl/message.go | 8 +- .../protobuf/internal/impl/message_reflect.go | 14 +- .../internal/impl/message_reflect_gen.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 6 +- .../protobuf/internal/impl/pointer_unsafe.go | 4 +- .../protobuf/internal/msgfmt/format.go | 4 +- .../protobuf/internal/order/range.go | 4 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/extension.go | 6 +- .../reflect/protodesc/desc_resolve.go | 5 + .../reflect/protodesc/desc_validate.go | 12 - .../reflect/protoreflect/source_gen.go | 2 + .../protobuf/reflect/protoreflect/type.go | 6 +- .../reflect/protoreflect/value_pure.go | 14 +- .../reflect/protoreflect/value_union.go | 14 +- .../protoreflect/value_unsafe_go120.go | 6 +- .../protoreflect/value_unsafe_go121.go | 8 +- .../reflect/protoregistry/registry.go | 14 +- .../types/descriptorpb/descriptor.pb.go | 995 +- .../protobuf/types/dynamicpb/dynamic.go | 16 +- .../types/gofeaturespb/go_features.pb.go | 44 +- .../protobuf/types/known/anypb/any.pb.go | 4 +- .../types/known/durationpb/duration.pb.go | 4 +- .../protobuf/types/known/emptypb/empty.pb.go | 4 +- .../types/known/fieldmaskpb/field_mask.pb.go | 4 +- .../types/known/structpb/struct.pb.go | 50 +- .../types/known/timestamppb/timestamp.pb.go | 4 +- .../types/known/wrapperspb/wrappers.pb.go | 20 +- .../protobuf/types/pluginpb/plugin.pb.go | 10 +- .../pkg/apis/apiextensions/types.go | 25 + .../pkg/apis/apiextensions/v1/conversion.go | 26 +- .../pkg/apis/apiextensions/v1/generated.pb.go | 679 +- .../pkg/apis/apiextensions/v1/generated.proto | 38 + .../pkg/apis/apiextensions/v1/types.go | 30 + .../apis/apiextensions/v1/types_jsonschema.go | 52 +- .../v1/zz_generated.conversion.go | 33 + .../apiextensions/v1/zz_generated.deepcopy.go | 21 + .../apiextensions/v1beta1/generated.pb.go | 742 +- .../apiextensions/v1beta1/generated.proto | 46 + .../pkg/apis/apiextensions/v1beta1/types.go | 38 + .../apiextensions/v1beta1/types_jsonschema.go | 52 +- .../v1beta1/zz_generated.conversion.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 26 + .../apiextensions/zz_generated.deepcopy.go | 26 + .../v1/customresourcedefinitionversion.go | 14 + .../apiextensions/v1/selectablefield.go | 39 + .../v1beta1/customresourcedefinitionspec.go | 14 + .../customresourcedefinitionversion.go | 14 + .../apiextensions/v1beta1/selectablefield.go | 39 + .../pkg/storage/cacher/watch_cache.go | 10 +- .../pkg/storage/etcd3/metrics/metrics.go | 12 +- vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/knative.dev/pkg/apis/condition_set.go | 6 +- vendor/knative.dev/pkg/apis/convert.go | 2 - vendor/knative.dev/pkg/apis/deprecated.go | 2 +- .../pkg/apis/duck/v1/addressable_types.go | 6 +- .../pkg/apis/duck/v1/auth_types.go | 98 + .../pkg/apis/duck/v1/destination.go | 4 +- .../pkg/apis/duck/v1/knative_reference.go | 1 - .../pkg/apis/duck/v1/kresource_type.go | 2 +- .../knative.dev/pkg/apis/duck/v1/register.go | 2 + .../pkg/apis/duck/v1/source_types.go | 2 +- .../pkg/apis/duck/v1/status_types.go | 1 - .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 86 + vendor/knative.dev/pkg/apis/duck/verify.go | 6 +- vendor/knative.dev/pkg/apis/kind2resource.go | 5 +- vendor/knative.dev/pkg/kmeta/names.go | 3 +- vendor/knative.dev/pkg/ptr/doc.go | 18 + vendor/knative.dev/pkg/ptr/ptr.go | 67 + vendor/knative.dev/pkg/ptr/value.go | 91 + vendor/knative.dev/pkg/tracker/interface.go | 1 - vendor/modules.txt | 359 +- .../controller-runtime/pkg/cache/cache.go | 29 +- .../pkg/client/fieldowner.go | 106 + .../pkg/manager/internal.go | 18 + .../controller-runtime/pkg/manager/manager.go | 9 + .../pkg/metrics/server/server.go | 20 + .../tools/setup-envtest/README.md | 42 +- .../tools/setup-envtest/env/env.go | 42 +- .../tools/setup-envtest/main.go | 38 +- .../tools/setup-envtest/remote/client.go | 211 +- .../tools/setup-envtest/remote/gcs_client.go | 202 + .../tools/setup-envtest/remote/http_client.go | 214 + .../tools/setup-envtest/remote/read_body.go | 66 + .../tools/setup-envtest/store/store.go | 2 +- .../tools/setup-envtest/versions/parse.go | 2 - .../tools/setup-envtest/versions/platform.go | 51 +- .../controller-tools/pkg/crd/gen.go | 41 +- .../controller-tools/pkg/crd/markers/doc.go | 17 +- .../pkg/crd/markers/priority.go | 37 + .../pkg/crd/markers/register.go | 10 +- .../pkg/crd/markers/topology.go | 4 +- .../pkg/crd/markers/validation.go | 50 +- .../crd/markers/zz_generated.markerhelp.go | 106 +- .../controller-tools/pkg/crd/schema.go | 76 +- .../pkg/crd/zz_generated.markerhelp.go | 18 +- .../controller-tools/pkg/deepcopy/gen.go | 1 - .../pkg/deepcopy/zz_generated.markerhelp.go | 4 +- .../controller-tools/pkg/genall/options.go | 1 - .../pkg/genall/zz_generated.markerhelp.go | 12 +- .../controller-tools/pkg/loader/loader.go | 11 +- .../controller-tools/pkg/loader/refs.go | 3 +- .../controller-tools/pkg/markers/collect.go | 1 - .../controller-tools/pkg/rbac/parser.go | 1 - .../pkg/rbac/zz_generated.markerhelp.go | 8 +- .../controller-tools/pkg/schemapatcher/gen.go | 2 +- .../schemapatcher/zz_generated.markerhelp.go | 8 +- .../controller-tools/pkg/version/version.go | 2 + .../controller-tools/pkg/webhook/parser.go | 3 +- .../pkg/webhook/zz_generated.markerhelp.go | 48 +- .../accumulator/loadconfigfromcrds.go | 2 +- .../internal/builtins/SortOrderTransformer.go | 53 +- .../builtinpluginconsts/namereference.go | 7 + .../api/internal/localizer/localizer.go | 2 +- .../api/internal/localizer/locloader.go | 5 +- .../kustomize/api/internal/localizer/util.go | 10 +- .../builtinconfig/transformerconfig.go | 17 +- .../plugins/loader/load_go_plugin_disabled.go | 5 +- .../api/internal/target/kusttarget.go | 5 +- .../target/kusttarget_configplugin.go | 29 +- .../kustomize/api/resource/factory.go | 16 +- .../kustomize/api/types/kustomization.go | 1 + .../sigs.k8s.io/kustomize/api/types/labels.go | 4 +- .../cmd/config/internal/commands/grep.go | 3 + .../cmd/config/internal/commands/run-fns.go | 2 +- .../kustomize/v5/commands/create/create.go | 3 +- .../kustomize/v5/commands/edit/add/addbase.go | 4 +- .../v5/commands/edit/add/addbuildmetadata.go | 3 +- .../v5/commands/edit/add/addcomponent.go | 3 +- .../v5/commands/edit/add/addgenerator.go | 3 +- .../v5/commands/edit/add/addmetadata.go | 13 +- .../v5/commands/edit/add/addresource.go | 3 +- .../v5/commands/edit/add/addtransformer.go | 3 +- .../kustomize/v5/commands/edit/fix/convert.go | 25 +- .../edit/remove/removebuildmetadata.go | 4 +- .../commands/edit/remove/removeconfigmap.go | 3 +- .../v5/commands/edit/remove/removemetadata.go | 2 +- .../v5/commands/edit/remove/removeresource.go | 3 +- .../v5/commands/edit/remove/removesecret.go | 3 +- .../commands/edit/remove/removetransformer.go | 3 +- .../internal/kustfile/kustomizationfile.go | 12 +- .../v5/commands/internal/util/validate.go | 4 +- .../v5/commands/localize/localize.go | 66 +- .../kustomize/kyaml/copyutil/copyutil.go | 197 + .../kyaml/fn/runtime/container/container.go | 2 +- .../kustomize/kyaml/kio/ignorefilesmatcher.go | 2 +- .../kyaml/openapi/kustomizationapi/swagger.go | 2 +- .../kustomize/kyaml/openapi/openapi.go | 2 +- .../sigs.k8s.io/kustomize/kyaml/yaml/alias.go | 2 +- 1441 files changed, 53965 insertions(+), 29025 deletions(-) create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go create mode 100644 vendor/cloud.google.com/go/internal/gen_info.sh rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/{zz_response_types.go => zz_responses.go} (99%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go create mode 100644 vendor/github.com/expr-lang/expr/patcher/with_timezone.go rename vendor/github.com/go-task/slim-sprig/{ => v3}/.editorconfig (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitattributes (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitignore (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/CHANGELOG.md (95%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/LICENSE.txt (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/README.md (88%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/Taskfile.yml (89%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/crypto.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/date.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/defaults.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/dict.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/doc.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/functions.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/list.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/network.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/numeric.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/reflect.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/regex.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/strings.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/url.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/AUTHORS (99%) rename vendor/github.com/google/go-github/{v60 => v62}/LICENSE (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_artifacts.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_cache.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_oidc.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_permissions_enterprise.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_permissions_orgs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_required_workflows.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_runner_groups.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_runners.go (97%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_secrets.go (96%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_variables.go (89%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_workflow_jobs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_workflow_runs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/actions_workflows.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/activity.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/activity_events.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/activity_notifications.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/activity_star.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/activity_watching.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/admin.go (97%) rename vendor/github.com/google/go-github/{v60 => v62}/github/admin_orgs.go (93%) rename vendor/github.com/google/go-github/{v60 => v62}/github/admin_stats.go (98%) rename vendor/github.com/google/go-github/{v60 => v62}/github/admin_users.go (94%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps.go (90%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps_hooks.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps_installation.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps_manifest.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/apps_marketplace.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/authorizations.go (98%) rename vendor/github.com/google/go-github/{v60 => v62}/github/billing.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/checks.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/code-scanning.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/codesofconduct.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/codespaces.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/codespaces_secrets.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/copilot.go (93%) rename vendor/github.com/google/go-github/{v60 => v62}/github/dependabot.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/dependabot_alerts.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/dependabot_secrets.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/dependency_graph.go (100%) create mode 100644 vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go rename vendor/github.com/google/go-github/{v60 => v62}/github/doc.go (99%) rename vendor/github.com/google/go-github/{v60 => v62}/github/emojis.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/enterprise.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/enterprise_actions_runner_groups.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/enterprise_actions_runners.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/enterprise_audit_log.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/enterprise_code_security_and_analysis.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/event.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/event_types.go (98%) rename vendor/github.com/google/go-github/{v60 => v62}/github/gists.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/gists_comments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git_blobs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git_commits.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git_refs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git_tags.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/git_trees.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/github-accessors.go (98%) rename vendor/github.com/google/go-github/{v60 => v62}/github/github.go (95%) rename vendor/github.com/google/go-github/{v60 => v62}/github/gitignore.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/interactions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/interactions_orgs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/interactions_repos.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issue_import.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_assignees.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_comments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_events.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_labels.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_milestones.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/issues_timeline.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/licenses.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/markdown.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/messages.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/meta.go (88%) rename vendor/github.com/google/go-github/{v60 => v62}/github/migrations.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/migrations_source_import.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/migrations_user.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_actions_allowed.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_actions_permissions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_audit_log.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_credential_authorizations.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_custom_roles.go (72%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_hooks.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_hooks_configuration.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_members.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_outside_collaborators.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_packages.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_personal_access_tokens.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_projects.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_properties.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_rules.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_security_managers.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/orgs_users_blocking.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/packages.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/projects.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/pulls.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/pulls_comments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/pulls_reviewers.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/pulls_reviews.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/pulls_threads.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/rate_limit.go (81%) rename vendor/github.com/google/go-github/{v60 => v62}/github/reactions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos.go (98%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_actions_access.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_actions_allowed.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_actions_permissions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_autolinks.go (99%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_codeowners.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_collaborators.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_comments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_commits.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_community_health.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_contents.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_deployment_branch_policies.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_deployment_protection_rules.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_deployments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_environments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_forks.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_hooks.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_hooks_configuration.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_invitations.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_keys.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_lfs.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_merging.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_pages.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_prereceive_hooks.go (93%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_projects.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_properties.go (53%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_releases.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_rules.go (95%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_stats.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_statuses.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_tags.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/repos_traffic.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/scim.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/search.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/secret_scanning.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/security_advisories.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/strings.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/teams.go (99%) rename vendor/github.com/google/go-github/{v60 => v62}/github/teams_discussion_comments.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/teams_discussions.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/teams_members.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/timestamp.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_administration.go (91%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_blocking.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_emails.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_followers.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_gpg_keys.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_keys.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_packages.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_projects.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/users_ssh_signing_keys.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/with_appengine.go (100%) rename vendor/github.com/google/go-github/{v60 => v62}/github/without_appengine.go (100%) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go delete mode 100644 vendor/github.com/jackc/pgx/v5/internal/anynil/anynil.go rename vendor/github.com/jackc/pgx/v5/pgconn/{internal => }/ctxwatch/context_watcher.go (71%) create mode 100644 vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/LICENSE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/NOTICE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/.gitignore (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/Makefile (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/decode.go (83%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/doc.go (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/encode.go (91%) create mode 100644 vendor/github.com/onsi/ginkgo/v2/Makefile create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go create mode 100644 vendor/github.com/sergi/go-diff/AUTHORS create mode 100644 vendor/github.com/sergi/go-diff/CONTRIBUTORS create mode 100644 vendor/github.com/sergi/go-diff/LICENSE create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go delete mode 100644 vendor/github.com/tidwall/gjson/logo.png delete mode 100644 vendor/github.com/youmark/pkcs8/.travis.yml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md create mode 100644 vendor/go.opentelemetry.io/otel/renovate.json delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/gen.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/internal.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/doc.go (96%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/exception.go (98%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/metric.go (77%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/schema.go (85%) delete mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_noasm.go delete mode 100644 vendor/golang.org/x/crypto/sha3/register.go delete mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_noasm.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go delete mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/golang.org/x/net/http2/timer.go delete mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go delete mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go delete mode 100644 vendor/google.golang.org/api/internal/gensupport/retryable_linux.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go rename vendor/google.golang.org/grpc/{ => balancer/pickfirst}/pickfirst.go (74%) create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/protoc-gen-go-grpc_test.sh delete mode 100644 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/adapt.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/grpc/stream_interfaces.go delete mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go create mode 100644 vendor/knative.dev/pkg/ptr/doc.go create mode 100644 vendor/knative.dev/pkg/ptr/ptr.go create mode 100644 vendor/knative.dev/pkg/ptr/value.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/gcs_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/priority.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/copyutil/copyutil.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 311e05cd298..e2811f6afc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## History -- [Unreleased](#unreleased) +- [v2.15.1](#v2151) - [v2.15.0](#v2150) - [v2.14.1](#v2141) - [v2.14.0](#v2140) @@ -52,41 +52,16 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - [v1.1.0](#v110) - [v1.0.0](#v100) -## Unreleased - -### New - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -#### Experimental - -Here is an overview of all new **experimental** features: - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Improvements - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +## v2.15.1 ### Fixes -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Deprecations - -You can find all deprecations in [this overview](https://github.com/kedacore/keda/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abreaking-change) and [join the discussion here](https://github.com/kedacore/keda/discussions/categories/deprecations). - -New deprecation(s): - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Breaking Changes - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **General**: Hashicorp Vault PKI doesn't fail with due to KeyPair mismatch ([#6028](https://github.com/kedacore/keda/issues/6028)) +- **JetStream**: Consumer leader change is correctly detected ([#6042](https://github.com/kedacore/keda/issues/6042)) ### Other -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **General**: Bump deps and k8s deps to 0.29.7 ([#6035](https://github.com/kedacore/keda/pull/6035)) ## v2.15.0 diff --git a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml index b44c59bcb66..3e1c957a6d2 100644 --- a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: cloudeventsources.eventing.keda.sh spec: group: eventing.keda.sh diff --git a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml index cd8db9c721e..db8dffadb28 100644 --- a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml +++ b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: clustertriggerauthentications.keda.sh spec: group: keda.sh diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 0cdc2c3bb3c..5ccf72f2d46 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: scaledjobs.keda.sh spec: group: keda.sh diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index 8f03de32b18..c4ded761ccf 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: scaledobjects.keda.sh spec: group: keda.sh diff --git a/config/crd/bases/keda.sh_triggerauthentications.yaml b/config/crd/bases/keda.sh_triggerauthentications.yaml index f6bb2f7bfe9..e8071ee6ff4 100644 --- a/config/crd/bases/keda.sh_triggerauthentications.yaml +++ b/config/crd/bases/keda.sh_triggerauthentications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: triggerauthentications.keda.sh spec: group: keda.sh diff --git a/go.mod b/go.mod index 6f0f5867438..b6eff2b66cc 100644 --- a/go.mod +++ b/go.mod @@ -1,144 +1,141 @@ module github.com/kedacore/keda/v2 -go 1.22 +go 1.22.0 + +toolchain go1.22.5 require ( - cloud.google.com/go/compute/metadata v0.3.0 - cloud.google.com/go/monitoring v1.18.2 - cloud.google.com/go/secretmanager v1.12.0 - cloud.google.com/go/storage v1.40.0 + cloud.google.com/go/compute/metadata v0.5.0 + cloud.google.com/go/monitoring v1.20.3 + cloud.google.com/go/secretmanager v1.13.5 + cloud.google.com/go/storage v1.43.0 dario.cat/mergo v1.0.0 github.com/Azure/azure-amqp-common-go/v4 v4.2.0 - github.com/Azure/azure-kusto-go v0.15.2 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-kusto-go v0.16.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 github.com/DataDog/datadog-api-client-go v1.16.0 github.com/Huawei/gophercloud v1.0.21 - github.com/IBM/sarama v1.43.1 + github.com/IBM/sarama v1.43.2 github.com/arangodb/go-driver v1.6.2 github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 - github.com/aws/aws-sdk-go-v2 v1.26.1 - github.com/aws/aws-sdk-go-v2/config v1.27.11 - github.com/aws/aws-sdk-go-v2/credentials v1.17.11 - github.com/aws/aws-sdk-go-v2/service/amp v1.25.4 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.0 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 - github.com/aws/aws-sdk-go-v2/service/sqs v1.31.4 - github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 - github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.27 + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 + github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.4 + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.3 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 + github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 + github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 github.com/cloudevents/sdk-go/v2 v2.15.2 github.com/denisenkom/go-mssqldb v0.12.3 github.com/dysnix/predictkube-libs v0.0.4-0.20230109175007-5a82fccd31c7 - github.com/dysnix/predictkube-proto v0.0.0-20220713123213-7135dce1e9c9 + github.com/dysnix/predictkube-proto v0.0.0-20240703070754-1483183ce065 github.com/elastic/go-elasticsearch/v7 v7.17.10 - github.com/expr-lang/expr v1.16.5 + github.com/expr-lang/expr v1.16.9 github.com/go-kivik/couchdb/v3 v3.4.1 github.com/go-kivik/kivik/v3 v3.2.4 - github.com/go-logr/logr v1.4.1 - github.com/go-playground/validator/v10 v10.19.0 + github.com/go-logr/logr v1.4.2 + github.com/go-playground/validator/v10 v10.22.0 github.com/go-sql-driver/mysql v1.8.1 github.com/gobwas/glob v0.2.3 github.com/gocql/gocql v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/go-github/v50 v50.2.0 github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.11.0 - github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 - github.com/hashicorp/vault/api v1.13.0 + github.com/gophercloud/gophercloud v1.14.0 + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 + github.com/hashicorp/vault/api v1.14.0 github.com/influxdata/influxdb-client-go/v2 v2.13.0 - github.com/jackc/pgx/v5 v5.5.5 + github.com/jackc/pgx/v5 v5.6.0 github.com/joho/godotenv v1.5.1 github.com/jstemmer/go-junit-report/v2 v2.1.0 github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 github.com/mitchellh/hashstructure v1.1.0 github.com/newrelic/newrelic-client-go v1.1.0 - github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.33.0 - github.com/open-policy-agent/cert-controller v0.0.0-00010101000000-000000000000 + github.com/onsi/ginkgo/v2 v2.19.1 + github.com/onsi/gomega v1.34.1 + github.com/open-policy-agent/cert-controller v0.10.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.53.0 - github.com/rabbitmq/amqp091-go v1.9.0 + github.com/prometheus/common v0.55.0 + github.com/rabbitmq/amqp091-go v1.10.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/redis/go-redis/v9 v9.5.1 + github.com/redis/go-redis/v9 v9.6.1 github.com/robfig/cron/v3 v3.0.1 github.com/segmentio/kafka-go v0.4.47 github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 github.com/spf13/cast v1.6.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - github.com/tidwall/gjson v1.17.1 + github.com/tidwall/gjson v1.17.3 github.com/xhit/go-str2duration/v2 v2.1.0 - github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a - go.etcd.io/etcd/client/v3 v3.5.13 - go.mongodb.org/mongo-driver v1.15.0 - go.opentelemetry.io/otel v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 - go.opentelemetry.io/otel/metric v1.26.0 + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 + go.etcd.io/etcd/client/v3 v3.5.15 + go.mongodb.org/mongo-driver v1.16.0 + go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 + go.opentelemetry.io/otel/metric v1.28.0 go.uber.org/mock v0.4.0 - golang.org/x/oauth2 v0.20.0 - golang.org/x/sync v0.7.0 - google.golang.org/api v0.181.0 - google.golang.org/grpc v1.64.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.34.1 - k8s.io/api v0.29.4 - k8s.io/apimachinery v0.29.4 - k8s.io/apiserver v0.29.4 - k8s.io/client-go v0.29.4 - k8s.io/code-generator v0.29.4 - k8s.io/component-base v0.29.4 + golang.org/x/oauth2 v0.22.0 + golang.org/x/sync v0.8.0 + google.golang.org/api v0.190.0 + google.golang.org/grpc v1.65.0 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 + google.golang.org/protobuf v1.34.2 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.31.0-beta.0 + k8s.io/apiserver v0.30.0 + k8s.io/client-go v0.30.0 + k8s.io/code-generator v0.30.0 + k8s.io/component-base v0.30.0 k8s.io/klog/v2 v2.120.1 - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 k8s.io/metrics v0.29.4 - k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 - knative.dev/pkg v0.0.0-20240423132823-3c6badc82748 - sigs.k8s.io/controller-runtime v0.17.3 - sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 - sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240425173932-1a855fe8c789 - sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 -) - -require ( - filippo.io/edwards25519 v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 + sigs.k8s.io/controller-runtime v0.17.5 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240804232438-89b5deec030c + sigs.k8s.io/controller-tools v0.15.0 + sigs.k8s.io/custom-metrics-apiserver v1.29.0 + sigs.k8s.io/kustomize/kustomize/v5 v5.4.3 ) // Remove this when they merge the PR and cut a release https://github.com/open-policy-agent/cert-controller/pull/202 replace github.com/open-policy-agent/cert-controller => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 replace ( - // pin k8s.io to v0.29.4 + // pin k8s.io to v0.29.7 & sigs.k8s.io/controller-runtime to v0.17.5 github.com/google/cel-go => github.com/google/cel-go v0.17.8 - github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model => github.com/prometheus/client_model v0.4.0 - github.com/prometheus/common => github.com/prometheus/common v0.44.0 - k8s.io/api => k8s.io/api v0.29.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 - k8s.io/apiserver => k8s.io/apiserver v0.29.4 - k8s.io/client-go => k8s.io/client-go v0.29.4 - k8s.io/code-generator => k8s.io/code-generator v0.29.4 - k8s.io/component-base => k8s.io/component-base v0.29.4 + github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_model => github.com/prometheus/client_model v0.5.0 + github.com/prometheus/common => github.com/prometheus/common v0.45.0 + k8s.io/api => k8s.io/api v0.29.7 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.7 + k8s.io/apiserver => k8s.io/apiserver v0.29.7 + k8s.io/client-go => k8s.io/client-go v0.29.7 + k8s.io/code-generator => k8s.io/code-generator v0.29.7 + k8s.io/component-base => k8s.io/component-base v0.29.7 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 - k8s.io/metrics => k8s.io/metrics v0.29.4 + k8s.io/metrics => k8s.io/metrics v0.29.7 ) replace ( @@ -150,32 +147,34 @@ replace ( github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.0 // opentelemetry cannot update to 1.25.0 according to the dependencies of google.golang.org/grpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 + //go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 // https://avd.aquasec.com/nvd/2022/cve-2022-27191/ - golang.org/x/crypto => golang.org/x/crypto v0.22.0 + golang.org/x/crypto => golang.org/x/crypto v0.25.0 // Needed for CVE-2023-39325 https://nvd.nist.gov/vuln/detail/CVE-2023-39325 - golang.org/x/net => golang.org/x/net v0.24.0 + golang.org/x/net => golang.org/x/net v0.27.0 // https://nvd.nist.gov/vuln/detail/CVE-2022-32149 - golang.org/x/text => golang.org/x/text v0.14.0 + golang.org/x/text => golang.org/x/text v0.16.0 // https://github.com/kedacore/keda/issues/5124 - google.golang.org/grpc => google.golang.org/grpc v1.63.2 + google.golang.org/grpc => google.golang.org/grpc v1.65.0 // Needed for CVE-2022-28948 https://www.cve.org/CVERecord?id=CVE-2022-28948 gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 ) require ( - cloud.google.com/go v0.113.0 // indirect - cloud.google.com/go/auth v0.4.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/iam v1.1.12 // indirect code.cloudfoundry.org/clock v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect @@ -187,21 +186,21 @@ require ( github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/andybalholm/brotli v1.1.0 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect @@ -232,7 +231,7 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -246,19 +245,19 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-github/v60 v60.0.0 // indirect + github.com/google/go-github/v62 v62.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -285,14 +284,14 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -308,10 +307,11 @@ require ( github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.14.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/samber/lo v1.39.0 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -330,49 +330,49 @@ require ( github.com/xdg-go/scram v1.1.2 github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.13 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect + go.etcd.io/etcd/api/v3 v3.5.15 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect - go.opentelemetry.io/otel/sdk v1.26.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.26.0 - go.opentelemetry.io/otel/trace v1.26.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/automaxprocs v1.5.3 go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.24.0 - golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.19.0 // indirect + golang.org/x/crypto v0.25.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 - k8s.io/apiextensions-apiserver v0.29.4 // indirect + k8s.io/apiextensions-apiserver v0.30.0 // indirect k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect - k8s.io/kms v0.29.4 // indirect + k8s.io/kms v0.30.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.1 // indirect - sigs.k8s.io/kustomize/cmd/config v0.14.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.0 // indirect + sigs.k8s.io/kustomize/api v0.17.3 // indirect + sigs.k8s.io/kustomize/cmd/config v0.14.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index b4733ecaa1f..8ecdbc874fd 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -38,32 +39,18 @@ cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMz cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= -cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= -cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= -cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= -cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0= -cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8= -cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= -cloud.google.com/go/accessapproval v1.7.5/go.mod h1:g88i1ok5dvQ9XJsxpUInWWvUBrIZhyPDPbk4T01OoJ0= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= -cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk= -cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ= -cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= -cloud.google.com/go/accesscontextmanager v1.8.5/go.mod h1:TInEhcZ7V9jptGNqN3EzZ5XMhT6ijWxTGjzyETwmL0Q= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= @@ -72,16 +59,6 @@ cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4Ahvj cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= -cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= -cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= -cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo= -cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw= -cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/aiplatform v1.54.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/aiplatform v1.57.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/aiplatform v1.58.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/aiplatform v1.58.2/go.mod h1:c3kCiVmb6UC1dHAjZjcpDj6ZS0bHQ2slL88ZjC2LtlA= -cloud.google.com/go/aiplatform v1.60.0/go.mod h1:eTlGuHOahHprZw3Hio5VKmtThIOak5/qy6pzdsqcQnM= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= @@ -89,35 +66,18 @@ cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9R cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= -cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA= -cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8= -cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/analytics v0.22.0/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/analytics v0.23.0/go.mod h1:YPd7Bvik3WS95KBok2gPXDqQPHy08TsCQG6CdUCb+u0= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= -cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8= -cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y= -cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= -cloud.google.com/go/apigateway v1.6.5/go.mod h1:6wCwvYRckRQogyDDltpANi3zsCDl6kWi0b4Je+w2UiI= cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= -cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18= -cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo= -cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= -cloud.google.com/go/apigeeconnect v1.6.5/go.mod h1:MEKm3AiT7s11PqTfKE3KZluZA9O91FNysvd3E6SJ6Ow= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= -cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8= -cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw= -cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= -cloud.google.com/go/apigeeregistry v0.8.3/go.mod h1:aInOWnqF4yMQx8kTjDqHNXjZGh/mxeNlAf52YqtASUs= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= @@ -127,19 +87,11 @@ cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= -cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo= -cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk= -cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= -cloud.google.com/go/appengine v1.8.5/go.mod h1:uHBgNoGLTS5di7BvU25NFDuKa82v0qQLjyMJLuPQrVo= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= -cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4= -cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0= -cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= -cloud.google.com/go/area120 v0.8.5/go.mod h1:BcoFCbDLZjsfe4EkCnEq1LKvHSK0Ew/zk5UFu6GMyA0= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= @@ -149,11 +101,6 @@ cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9e cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= -cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U= -cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI= -cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU= -cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= -cloud.google.com/go/artifactregistry v1.14.7/go.mod h1:0AUKhzWQzfmeTvT4SjfI4zjot72EMfrkvL9g9aRjnnM= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= @@ -163,14 +110,6 @@ cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrd cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= -cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg= -cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4= -cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs= -cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/asset v1.16.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/asset v1.17.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/asset v1.17.1/go.mod h1:byvDw36UME5AzGNK7o4JnOnINkwOZ1yRrGrKIahHrng= -cloud.google.com/go/asset v1.17.2/go.mod h1:SVbzde67ehddSoKf5uebOD1sYw8Ab/jD/9EIeWg99q4= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= @@ -178,54 +117,30 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= -cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= -cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/assuredworkloads v1.11.5/go.mod h1:FKJ3g3ZvkL2D7qtqIGnDufFkHxwIpNM9vtmhvt+6wqk= -cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= -cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= +cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= -cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg= -cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY= -cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= -cloud.google.com/go/automl v1.13.5/go.mod h1:MDw3vLem3yh+SvmSgeYUmUKqyls6NzSumDm9OJ3xJ1Y= cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= -cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= -cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88= -cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM= -cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= -cloud.google.com/go/baremetalsolution v1.2.4/go.mod h1:BHCmxgpevw9IEryE99HbYEfxXkAEA3hkMJbYYsHtIuY= cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= -cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= -cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= -cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8= -cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98= -cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/batch v1.7.0/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/batch v1.8.0/go.mod h1:k8V7f6VE2Suc0zUM4WtoibNrA6D3dqBpB+++e3vSGYc= cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= -cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk= -cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc= -cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/beyondcorp v1.0.4/go.mod h1:Gx8/Rk2MxrvWfn4WIhHIG1NV7IBfg14pTKv1+EArVcc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -241,11 +156,6 @@ cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9 cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= -cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= -cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= -cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= -cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik= -cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -253,44 +163,21 @@ cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOA cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= -cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= -cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= -cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg= -cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU= -cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/billing v1.18.2/go.mod h1:PPIwVsOOQ7xzbADCwNe8nvK776QpfrOAUkvKjCUcpSE= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= -cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= -cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE= -cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0= -cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/binaryauthorization v1.8.0/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/binaryauthorization v1.8.1/go.mod h1:1HVRyBerREA/nhI7yLang4Zn7vfNVA3okoAR9qYQJAQ= cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= -cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ= -cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00= -cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= -cloud.google.com/go/certificatemanager v1.7.5/go.mod h1:uX+v7kWqy0Y3NG/ZhNvffh0kuqkKZIXdvlZRO7z0VtM= cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= -cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= -cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ= -cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk= -cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/channel v1.17.4/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/channel v1.17.5/go.mod h1:FlpaOSINDAXgEext0KMaBq/vwpLMkkPAw9b2mApQeHc= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= @@ -298,21 +185,10 @@ cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8 cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU= -cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg= -cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/cloudbuild v1.15.1/go.mod h1:gIofXZSu+XD2Uy+qkOrGKEx45zd7s28u/k8f99qKals= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= -cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= -cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk= -cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU= -cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= -cloud.google.com/go/clouddms v1.7.4/go.mod h1:RdrVqoFG9RWI5AvZ81SxJ/xvxPdtcRhFotwdE79DieY= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= @@ -321,10 +197,6 @@ cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6 cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= -cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk= -cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys= -cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= -cloud.google.com/go/cloudtasks v1.12.6/go.mod h1:b7c7fe4+TJsFZfDyzO51F7cjq7HLUlRi/KZQLQjDsaY= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -341,29 +213,18 @@ cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/ cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= -cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= -cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU= -cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE= -cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso= -cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/contactcenterinsights v1.12.0/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/contactcenterinsights v1.13.0/go.mod h1:ieq5d5EtHsu8vhe2y3amtZ+BE+AQwX5qAy7cpo0POsI= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= @@ -371,24 +232,11 @@ cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= -cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= -cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04= -cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4= -cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/container v1.28.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/container v1.30.1/go.mod h1:vkbfX0EnAKL/vgVECs5BZn24e1cJROzgszJirRKQ4Bg= -cloud.google.com/go/container v1.31.0/go.mod h1:7yABn5s3Iv3lmw7oMmyGbeV6tQj86njcTijkkGuvdZA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= -cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= -cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY= -cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8= -cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= -cloud.google.com/go/containeranalysis v0.11.4/go.mod h1:cVZT7rXYBS9NG1rhQbWL9pWbXCKHWJPYraE8/FTSYPE= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -400,81 +248,38 @@ cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/l cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= -cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= -cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= -cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= -cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk= -cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= -cloud.google.com/go/datacatalog v1.19.0/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= -cloud.google.com/go/datacatalog v1.19.2/go.mod h1:2YbODwmhpLM4lOFe3PuEhHK9EyTzQJ5AXgIy7EDKTEE= -cloud.google.com/go/datacatalog v1.19.3/go.mod h1:ra8V3UAsciBpJKQ+z9Whkxzxv7jmQg1hfODr3N3YPJ4= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= -cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo= -cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE= -cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= -cloud.google.com/go/dataflow v0.9.5/go.mod h1:udl6oi8pfUHnL0z6UN9Lf9chGqzDMVqcYTcZ1aPnCZQ= cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= -cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM= -cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg= -cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= -cloud.google.com/go/dataform v0.9.2/go.mod h1:S8cQUwPNWXo7m/g3DhWHsLBoufRNn9EgFrMgne2j7cI= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= -cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0= -cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE= -cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= -cloud.google.com/go/datafusion v1.7.5/go.mod h1:bYH53Oa5UiqahfbNK9YuYKteeD4RbQSNMx7JF7peGHc= cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= -cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE= -cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI= -cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= -cloud.google.com/go/datalabeling v0.8.5/go.mod h1:IABB2lxQnkdUbMnQaOl2prCOfms20mcPxDBm36lps+s= cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs= -cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y= -cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataplex v1.11.2/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataplex v1.13.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataplex v1.14.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataplex v1.14.1/go.mod h1:bWxQAbg6Smg+sca2+Ex7s8D9a5qU6xfXtwmq4BVReps= -cloud.google.com/go/dataplex v1.14.2/go.mod h1:0oGOSFlEKef1cQeAHXy4GZPB/Ife0fz/PxBf+ZymA2U= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= -cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= -cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o= -cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4= -cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataproc/v2 v2.4.0/go.mod h1:3B1Ht2aRB8VZIteGxQS/iNSJGzt9+CA0WGnDVMEm7Z4= cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= -cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs= -cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4= -cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/dataqna v0.8.5/go.mod h1:vgihg1mz6n7pb5q2YJF7KlXve6tCglInd6XO0JGOlWM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= @@ -482,8 +287,6 @@ cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nw cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= @@ -492,23 +295,12 @@ cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZ cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= -cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c= -cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0= -cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= -cloud.google.com/go/datastream v1.10.4/go.mod h1:7kRxPdxZxhPg3MFeCSulmAJnil8NJGGvSNdn4p1sRZo= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= -cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g= -cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw= -cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= -cloud.google.com/go/deploy v1.15.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= -cloud.google.com/go/deploy v1.16.0/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= -cloud.google.com/go/deploy v1.17.0/go.mod h1:XBr42U5jIr64t92gcpOXxNrqL2PStQCXHuKK5GRUuYo= -cloud.google.com/go/deploy v1.17.1/go.mod h1:SXQyfsXrk0fBmgBHRzBjQbZhMfKZ3hMQBw5ym7MN/50= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -519,24 +311,10 @@ cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgp cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= -cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= -cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= -cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk= -cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c= -cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= -cloud.google.com/go/dialogflow v1.47.0/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= -cloud.google.com/go/dialogflow v1.48.0/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= -cloud.google.com/go/dialogflow v1.48.1/go.mod h1:C1sjs2/g9cEwjCltkKeYp3FFpz8BOzNondEaAlCpt+A= -cloud.google.com/go/dialogflow v1.48.2/go.mod h1:7A2oDf6JJ1/+hdpnFRfb/RjJUOh2X3rhIa5P8wQSEX4= -cloud.google.com/go/dialogflow v1.49.0/go.mod h1:dhVrXKETtdPlpPhE7+2/k4Z8FRNUp6kMV3EW3oz/fe0= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= -cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ= -cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0= -cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= -cloud.google.com/go/dlp v1.11.2/go.mod h1:9Czi+8Y/FegpWzgSfkRlyz+jwW6Te9Rv26P3UfU/h/w= cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= @@ -545,67 +323,34 @@ cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFv cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= -cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= -cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= -cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo= -cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y= -cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/documentai v1.23.6/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/documentai v1.23.7/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/documentai v1.23.8/go.mod h1:Vd/y5PosxCpUHmwC+v9arZyeMfTqBR9VIwOwIqQYYfA= -cloud.google.com/go/documentai v1.25.0/go.mod h1:ftLnzw5VcXkLItp6pw1mFic91tMRyfv6hHEY5br4KzY= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= -cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I= -cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU= -cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= -cloud.google.com/go/domains v0.9.5/go.mod h1:dBzlxgepazdFhvG7u23XMhmMKBjrkoUNaw0A8AQB55Y= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= -cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4= -cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA= -cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= -cloud.google.com/go/edgecontainer v1.1.5/go.mod h1:rgcjrba3DEDEQAidT4yuzaKWTbkTI5zAMu3yy6ZWS0M= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= -cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo= -cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM= -cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= -cloud.google.com/go/essentialcontacts v1.6.6/go.mod h1:XbqHJGaiH0v2UvtuucfOzFXN+rpL/aU5BCZLn4DYl1Q= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= -cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y= -cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM= -cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= -cloud.google.com/go/eventarc v1.13.4/go.mod h1:zV5sFVoAa9orc/52Q+OuYUG9xL2IIZTbbuTHC6JSY8s= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= -cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE= -cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0= -cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/filestore v1.8.1/go.mod h1:MbN9KcaM47DRTIuLfQhJEsjaocVebNtNQhSLhKCF5GM= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= -cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= -cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -614,10 +359,6 @@ cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1Yb cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= -cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE= -cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug= -cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= -cloud.google.com/go/functions v1.16.0/go.mod h1:nbNpfAG7SG7Duw/o1iZ6ohvL7mc6MapWQVpqtM29n8k= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= @@ -628,49 +369,26 @@ cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= -cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= -cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk= -cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I= -cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= -cloud.google.com/go/gkebackup v1.3.5/go.mod h1:KJ77KkNN7Wm1LdMopOelV6OodM01pMuK2/5Zt1t4Tvc= cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= -cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY= -cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo= -cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= -cloud.google.com/go/gkeconnect v0.8.5/go.mod h1:LC/rS7+CuJ5fgIbXv8tCD/mdfnlAadTaUufgOkmijuk= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= -cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY= -cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8= -cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= -cloud.google.com/go/gkehub v0.14.5/go.mod h1:6bzqxM+a+vEH/h8W8ec4OJl4r36laxTs3A/fMNHJ0wA= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= -cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8= -cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo= -cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/gkemulticloud v1.1.0/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/gkemulticloud v1.1.1/go.mod h1:C+a4vcHlWeEIf45IB5FFR5XGjTeYhF83+AYIpTy4i2Q= cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= -cloud.google.com/go/grafeas v0.3.4/go.mod h1:A5m316hcG+AulafjAbPKXBO/+I5itU4LOdKO2R/uDIc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= -cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU= -cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48= -cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/gsuiteaddons v1.6.5/go.mod h1:Lo4P2IvO8uZ9W+RaC6s1JVxo42vgy+TX5a6hfBZ0ubs= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= @@ -683,41 +401,23 @@ cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCta cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= -cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= +cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= -cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= -cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q= -cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI= -cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= -cloud.google.com/go/iap v1.9.4/go.mod h1:vO4mSq0xNf/Pu6E5paORLASBwEmphXEjgCFg7aeNu1w= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= -cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk= -cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU= -cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= -cloud.google.com/go/ids v1.4.5/go.mod h1:p0ZnyzjMWxww6d2DvMGnFwCsSxDJM666Iir1bK1UuBo= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= -cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg= -cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I= -cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= -cloud.google.com/go/iot v1.7.5/go.mod h1:nq3/sqTz3HGaWJi1xNiX7F41ThOzpud67vwk0YsSsqs= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= @@ -728,82 +428,45 @@ cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyy cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= -cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= -cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/kms v1.15.6/go.mod h1:yF75jttnIdHfGBoE51AKsD/Yqf+/jICzB9v1s1acsms= -cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= -cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= -cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U= -cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4= -cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= -cloud.google.com/go/language v1.12.3/go.mod h1:evFX9wECX6mksEva8RbRnr/4wi/vKGYnAJrTRXU8+f8= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= -cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA= -cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM= -cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= -cloud.google.com/go/lifesciences v0.9.5/go.mod h1:OdBm0n7C0Osh5yZB7j9BXyrMnTRGBJIZonUMxo5CzPw= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= -cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= -cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= +cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= -cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8= -cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4= -cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= -cloud.google.com/go/managedidentities v1.6.5/go.mod h1:fkFI2PwwyRQbjLxlm5bQ8SjtObFMW3ChBGNqaMcgZjI= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= -cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY= -cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4= -cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= -cloud.google.com/go/maps v1.6.2/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= -cloud.google.com/go/maps v1.6.3/go.mod h1:VGAn809ADswi1ASofL5lveOHPnE6Rk/SFTTBx1yuOLw= -cloud.google.com/go/maps v1.6.4/go.mod h1:rhjqRy8NWmDJ53saCfsXQ0LKwBHfi6OSh5wkq6BaMhI= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= -cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8= -cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y= -cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= -cloud.google.com/go/mediatranslation v0.8.5/go.mod h1:y7kTHYIPCIfgyLbKncgqouXJtLsU+26hZhHEEy80fSs= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= -cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A= -cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo= -cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= -cloud.google.com/go/memcache v1.10.5/go.mod h1:/FcblbNd0FdMsx4natdj+2GWzTq+cjZvMa1I+9QsuMA= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= @@ -811,25 +474,13 @@ cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= -cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk= -cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU= -cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA= -cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= -cloud.google.com/go/metastore v1.13.4/go.mod h1:FMv9bvPInEfX9Ac1cVcRXp8EBBQnBcqH6gz3KvJ9BAE= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= -cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= -cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= -cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/monitoring v1.17.1/go.mod h1:SJzPMakCF0GHOuKEH/r4hxVKF04zl+cRPQyc3d/fqII= -cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= -cloud.google.com/go/monitoring v1.18.2 h1:nIQdZdf1/9M0cEmXSlLB2jMq/k3CRh9p3oUzS06VDG8= -cloud.google.com/go/monitoring v1.18.2/go.mod h1:MuL95M6d9HtXQOaWP9JxhFZJKP+fdTF0Gt5xl4IDsew= +cloud.google.com/go/monitoring v1.20.3 h1:v/7MXFxYrhXLEZ9sSfwXdlTLLB/xrU7xTyYjY5acynQ= +cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -837,30 +488,15 @@ cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5Mp cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= -cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= -cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= -cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug= -cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo= -cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= -cloud.google.com/go/networkconnectivity v1.14.4/go.mod h1:PU12q++/IMnDJAB+3r+tJtuCXCfwfN+C6Niyj6ji1Po= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= -cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= -cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI= -cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw= -cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= -cloud.google.com/go/networkmanagement v1.9.4/go.mod h1:daWJAl0KTFytFL7ar33I6R/oNBH8eEOX/rBNHrC/8TA= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= -cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI= -cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0= -cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= -cloud.google.com/go/networksecurity v0.9.5/go.mod h1:KNkjH/RsylSGyyZ8wXpue8xpCEK+bTtvof8SBfIhMG8= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= @@ -868,38 +504,19 @@ cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vu cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= -cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= -cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A= -cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ= -cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= -cloud.google.com/go/notebooks v1.11.3/go.mod h1:0wQyI2dQC3AZyQqWnRsp+yA+kY4gC7ZIVP4Qg3AQcgo= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= -cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= -cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8= -cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo= -cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= -cloud.google.com/go/optimization v1.6.3/go.mod h1:8ve3svp3W6NFcAEFr4SfJxrldzhUl4VMUJmhrqVKtYA= cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= -cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0= -cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs= -cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= -cloud.google.com/go/orchestration v1.8.5/go.mod h1:C1J7HesE96Ba8/hZ71ISTV2UAat0bwN+pi85ky38Yq8= cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= -cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o= -cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM= -cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/orgpolicy v1.12.0/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/orgpolicy v1.12.1/go.mod h1:aibX78RDl5pcK3jA8ysDQCFkVxLj3aOQqrbBaUL2V5I= cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= @@ -907,50 +524,27 @@ cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= -cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0= -cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM= -cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= -cloud.google.com/go/osconfig v1.12.5/go.mod h1:D9QFdxzfjgw3h/+ZaAb5NypM8bhOMqBzgmbhzWViiW8= cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= -cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE= -cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg= -cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU= -cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= -cloud.google.com/go/oslogin v1.13.0/go.mod h1:xPJqLwpTZ90LSE5IL1/svko+6c5avZLluiyylMb/sRA= -cloud.google.com/go/oslogin v1.13.1/go.mod h1:vS8Sr/jR7QvPWpCjNqy6LYZr5Zs1e8ZGW/KPn9gmhws= cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= -cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8= -cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8= -cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= -cloud.google.com/go/phishingprotection v0.8.5/go.mod h1:g1smd68F7mF1hgQPuYn3z8HDbNre8L6Z0b7XMYFmX7I= cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= -cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= -cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U= -cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs= -cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= -cloud.google.com/go/policytroubleshooter v1.10.3/go.mod h1:+ZqG3agHT7WPb4EBIRqUv4OyIwRTZvsVDHZ8GlZaoxk= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= -cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc= -cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE= -cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/privatecatalog v0.9.5/go.mod h1:fVWeBOVe7uj2n3kWRGlUQqR/pOd450J9yZoOECcQqJk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -961,8 +555,6 @@ cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9 cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= -cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c= -cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= @@ -976,82 +568,43 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91j cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recaptchaenterprise/v2 v2.8.4/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.0/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.2/go.mod h1:trwwGkfhCmp05Ll5MSJPXY7yvnO0p4v3orGANAFHAuU= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= -cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y= -cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8= -cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= -cloud.google.com/go/recommendationengine v0.8.5/go.mod h1:A38rIXHGFvoPvmy6pZLozr0g59NRNREz4cx7F58HAsQ= cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= -cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= -cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18= -cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y= -cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/recommender v1.12.0/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/recommender v1.12.1/go.mod h1:gf95SInWNND5aPas3yjwl0I572dtudMhMIG4ni8nr+0= cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= -cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA= -cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U= -cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= -cloud.google.com/go/redis v1.14.2/go.mod h1:g0Lu7RRRz46ENdFKQ2EcQZBAJ2PtJHJLuiiRuEXwyQw= cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= -cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE= -cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U= -cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= -cloud.google.com/go/resourcemanager v1.9.5/go.mod h1:hep6KjelHA+ToEjOfO3garMKi/CLYwTqeAw7YiEI9x8= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= -cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk= -cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic= -cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= -cloud.google.com/go/resourcesettings v1.6.5/go.mod h1:WBOIWZraXZOGAgoR4ukNj0o0HiSMO62H9RpFi9WjP9I= cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= -cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8= -cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo= -cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= -cloud.google.com/go/retail v1.15.1/go.mod h1:In9nSBOYhLbDGa87QvWlnE1XA14xBN2FpQRiRsUs9wU= -cloud.google.com/go/retail v1.16.0/go.mod h1:LW7tllVveZo4ReWt68VnldZFWJRzsh9np+01J9dYWzE= cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= -cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU= -cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s= -cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE= -cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= -cloud.google.com/go/run v1.3.4/go.mod h1:FGieuZvQ3tj1e9GnzXqrMABSuir38AJg5xhiYq+SF3o= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= @@ -1059,22 +612,13 @@ cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJe cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= -cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY= -cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc= -cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/scheduler v1.10.6/go.mod h1:pe2pNCtJ+R01E06XCDOJs1XvAMbv28ZsQEbqknxGOuE= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= -cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= -cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= -cloud.google.com/go/secretmanager v1.12.0 h1:e5pIo/QEgiFiHPVJPxM5jbtUr4O/u5h2zLHYtkFQr24= -cloud.google.com/go/secretmanager v1.12.0/go.mod h1:Y1Gne3Ag+fZ2TDTiJc8ZJCMFbi7k1rYT4Rw30GXfvlk= +cloud.google.com/go/secretmanager v1.13.5 h1:tXlHvpm97mFD0Lv50N4U4zlXfkoTNay3BmpNA/W7/oI= +cloud.google.com/go/secretmanager v1.13.5/go.mod h1:/OeZ88l5Z6nBVilV0SXgv6XJ243KP2aIhSWRMrbvDCQ= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= @@ -1083,10 +627,6 @@ cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= -cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg= -cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs= -cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= -cloud.google.com/go/security v1.15.5/go.mod h1:KS6X2eG3ynWjqcIX976fuToN5juVkF6Ra6c7MPnldtc= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= @@ -1094,11 +634,6 @@ cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZ cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= -cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s= -cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI= -cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/securitycenter v1.24.3/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/securitycenter v1.24.4/go.mod h1:PSccin+o1EMYKcFQzz9HMMnZ2r9+7jbc+LvPjXhpwcU= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= @@ -1112,10 +647,6 @@ cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxF cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= -cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI= -cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg= -cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= -cloud.google.com/go/servicedirectory v1.11.4/go.mod h1:Bz2T9t+/Ehg6x+Y7Ycq5xiShYLD96NfEsWNHyitj1qM= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= @@ -1128,23 +659,10 @@ cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IW cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= -cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc= -cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc= -cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= -cloud.google.com/go/shell v1.7.5/go.mod h1:hL2++7F47/IfpfTO53KYf1EC+F56k3ThfNEXd4zcuiE= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= -cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= -cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= -cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0= -cloud.google.com/go/spanner v1.53.0/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws= -cloud.google.com/go/spanner v1.53.1/go.mod h1:liG4iCeLqm5L3fFLU5whFITqP0e0orsAW1uUSrd4rws= -cloud.google.com/go/spanner v1.54.0/go.mod h1:wZvSQVBgngF0Gq86fKup6KIYmN2be7uOKjtK97X+bQU= -cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0= -cloud.google.com/go/spanner v1.56.0/go.mod h1:DndqtUKQAt3VLuV2Le+9Y3WTnq5cNKrnLb/Piqcj+h0= -cloud.google.com/go/spanner v1.57.0/go.mod h1:aXQ5QDdhPRIqVhYmnkAdwPYvj/DRN0FguclhEWw+jOo= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= @@ -1153,11 +671,6 @@ cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDF cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= -cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA= -cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI= -cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/speech v1.21.1/go.mod h1:E5GHZXYQlkqWQwY5xRSLHw2ci5NMQNG52FfMU1aZrIA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -1170,54 +683,32 @@ cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= -cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs= -cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA= -cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= -cloud.google.com/go/storagetransfer v1.10.4/go.mod h1:vef30rZKu5HSEf/x1tK3WfWrL0XVoUQN/EPDRGPzjZs= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= -cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo= -cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY= -cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= -cloud.google.com/go/talent v1.6.6/go.mod h1:y/WQDKrhVz12WagoarpAIyKKMeKGKHWPoReZ0g8tseQ= cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= -cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4= -cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so= -cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= -cloud.google.com/go/texttospeech v1.7.5/go.mod h1:tzpCuNWPwrNJnEa4Pu5taALuZL4QRRLcb+K9pbhXT6M= cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= -cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU= -cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY= -cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= -cloud.google.com/go/tpu v1.6.5/go.mod h1:P9DFOEBIBhuEcZhXi+wPoVy/cji+0ICFi4TtTkMHSSs= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA= -cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY= -cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= -cloud.google.com/go/trace v1.10.5/go.mod h1:9hjCV1nGBCtXbAE4YK7OqJ8pmPYSxPA0I67JwRd5s3M= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -1225,12 +716,6 @@ cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8= -cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY= -cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/translate v1.10.0/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/translate v1.10.1/go.mod h1:adGZcQNom/3ogU65N9UXHOnnSvjPwA/jKQUMnsYXOyk= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= @@ -1239,21 +724,12 @@ cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxE cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= -cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= -cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU= -cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA= -cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= -cloud.google.com/go/video v1.20.4/go.mod h1:LyUVjyW+Bwj7dh3UJnUGZfyqjEto9DnrvTe1f/+QrW0= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= -cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc= -cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo= -cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= -cloud.google.com/go/videointelligence v1.11.5/go.mod h1:/PkeQjpRponmOerPeJxNPuxvi12HlW7Em0lJO14FC3I= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= @@ -1262,66 +738,36 @@ cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98z cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= -cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM= -cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw= -cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= -cloud.google.com/go/vision/v2 v2.7.6/go.mod h1:ZkvWTVNPBU3YZYzgF9Y1jwEbD1NBOCyJn0KFdQfE6Bw= -cloud.google.com/go/vision/v2 v2.8.0/go.mod h1:ocqDiA2j97pvgogdyhoxiQp2ZkDCyr0HWpicywGGRhU= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= -cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8= -cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo= -cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= -cloud.google.com/go/vmmigration v1.7.5/go.mod h1:pkvO6huVnVWzkFioxSghZxIGcsstDvYiVCxQ9ZH3eYI= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= -cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk= -cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA= -cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= -cloud.google.com/go/vmwareengine v1.1.1/go.mod h1:nMpdsIVkUrSaX8UvmnBhzVzG7PPvNYc5BszcvIVudYs= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= -cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU= -cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc= -cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= -cloud.google.com/go/vpcaccess v1.7.5/go.mod h1:slc5ZRvvjP78c2dnL7m4l4R9GwL3wDLcpIWz6P/ziig= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= -cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc= -cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8= -cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= -cloud.google.com/go/webrisk v1.9.5/go.mod h1:aako0Fzep1Q714cPEM5E+mtYX8/jsfegAuS8aivxy3U= cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= -cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas= -cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw= -cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= -cloud.google.com/go/websecurityscanner v1.6.5/go.mod h1:QR+DWaxAz2pWooylsBF854/Ijvuoa3FCyS1zBa1rAVQ= cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= -cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= -cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= -cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= -cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= -cloud.google.com/go/workflows v1.12.4/go.mod h1:yQ7HUqOkdJK4duVtMeBCAOPiN1ZF1E9pAMX51vpwB/w= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c= code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo= @@ -1334,24 +780,24 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-amqp-common-go/v4 v4.2.0 h1:q/jLx1KJ8xeI8XGfkOWMN9XrXzAfVTkyvCxPvHCjd2I= github.com/Azure/azure-amqp-common-go/v4 v4.2.0/go.mod h1:GD3m/WPPma+621UaU6KNjKEo5Hl09z86viKwQjTpV0Q= -github.com/Azure/azure-kusto-go v0.15.2 h1:OlABJilic9TythSgWW6i8Fd0SgNTg0t9jBu6WVsaixM= -github.com/Azure/azure-kusto-go v0.15.2/go.mod h1:9F2zvXH8B6eWzgI1S4k1ZXAIufnBZ1bv1cW1kB1n3D0= +github.com/Azure/azure-kusto-go v0.16.1 h1:vCBWcQghmC1qIErUUgVNWHxGhZVStu1U/hki6iBA14k= +github.com/Azure/azure-kusto-go v0.16.1/go.mod h1:9F2zvXH8B6eWzgI1S4k1ZXAIufnBZ1bv1cW1kB1n3D0= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 h1:d7S13DPk63SvBJfSUiMJJ26tRsvrBumkLPEfQEAarGk= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0/go.mod h1:7e/gsXp4INB4k/vg0h3UOkYpDK6oZqctxr+L05FGybg= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1/go.mod h1:vMGz6NOUGJ9h5ONl2kkyaqq5E0g7s4CHNSrXN5fl8UY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 h1:QISzMrspEvZj4zrrN2wlNwfum5RmnKQhQNiSujwH7oU= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0/go.mod h1:xNjFERdhyMqZncbNJSPBsTCddk5kwsUVUzELQPMj/LA= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 h1:l+LIDHsZkFBiipIKhOn3m5/2MX4bwNwHYWyNulPaTis= github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0/go.mod h1:BjVVBLUiZ/qR2a4PAhjs8uGXNfStD0tSxgxCMfcVRT8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 h1:+dggnR89/BIIlRlQ6d19dkhhdd/mQUiQbXhyHUFiB4w= @@ -1360,30 +806,29 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFG github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 h1:h4Zxgmi9oyZL2l8jeg1iRTqPloHktywWcu0nlJmo1tA= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0/go.mod h1:LgLGXawqSreJz135Elog0ywTJDsm0Hz2k+N+6ZK35u8= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 h1:lJwNFV+xYjHREUTHJKx/ZF6CJSt9znxmLw9DqSTvyRU= github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0/go.mod h1:GfT0aGew8Qj5yiQVqOO5v7N8fanbJGyUoHqXg56qcVY= github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -1405,8 +850,8 @@ github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Huawei/gophercloud v1.0.21 h1:HhtzZzRGZiVmLypqHlXrGAcdC1TJW99FLewfPSVktpY= github.com/Huawei/gophercloud v1.0.21/go.mod h1:TUtAO2PE+Nj7/QdfUXbhi5Xu0uFKVccyukPA7UCxD9w= -github.com/IBM/sarama v1.43.1 h1:Z5uz65Px7f4DhI/jQqEm/tV9t8aU+JUdTyW/K/fCXpA= -github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE= +github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= +github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -1419,27 +864,18 @@ github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/participle/v2 v2.0.0/go.mod h1:rAKZdJldHu8084ojcWevWAL8KmEU+AT+Olodb+WoN2Y= -github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= -github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= -github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/arangodb/go-driver v1.6.2 h1:3o4inejwR7VMmsKvQJ6hepx4au9sUT6C/RDrXykuD1g= @@ -1456,61 +892,61 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= github.com/aws/aws-sdk-go-v2 v1.16.12/go.mod h1:C+Ym0ag2LIghJbXhfXZ0YEEp49rBWowxKzJLUoob0ts= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= github.com/aws/aws-sdk-go-v2/config v1.17.2/go.mod h1:jumS/AMwul4WaG8vyXsF6kUndG9zndR+yfYBwl4i9ds= -github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= -github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= github.com/aws/aws-sdk-go-v2/credentials v1.12.15/go.mod h1:41zTC6U/78fUD7ZCa5NymTJANDjfqySg5YEAYVFl2Ic= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13/go.mod h1:y0eXmsNBFIVjUE8ZBjES8myOHlMsXDz7qGT93+MVdjk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19/go.mod h1:llxE6bwUZhuCas0K7qGiu5OgMis3N7kdWtFSxoHmJ7E= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13/go.mod h1:lB12mkZqCSo5PsdBFLNqc2M/OOYgNAy8UtaktyuWvE8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20/go.mod h1:bfTcsThj5a9P5pIGRy0QudJ8k4+issxXX+O6Djnd5Cs= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.4 h1:TgkApdPnCVX7RtHMcsswmUYsDnnj1LLIh0KLD9YL/n4= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.4/go.mod h1:i5BA2ACkXa8Pzqinz/xEukdVJnMdfQLRcx7ftb5g0pk= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.0 h1:vAfGwYFCcPDS9Bg7ckfMBer6olJLOHsOAVoKWpPIirs= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.0/go.mod h1:U12sr6Lt14X96f16t+rR52+2BdqtydwN7DjEEHRMjO0= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 h1:dZXY07Dm59TxAjJcUfNMJHLDI/gLMxTRZefn2jFAVsw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1/go.mod h1:lVLqEtX+ezgtfalyJs7Peb0uv9dEpAQP5yuq2O26R44= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 h1:hSwDD19/e01z3pfyx+hDeX5T/0Sn+ZEnnTO5pVWKWx8= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4/go.mod h1:61CuGwE7jYn0g2gl7K3qoT4vCY59ZQEixkPu8PN5IrE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 h1:6tayEze2Y+hiL3kdnEUxSPsP+pJsUfwLSFspFl1ru9Q= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6/go.mod h1:qVNb/9IOVsLCZh0x2lnagrBwQ9fxajUpXS7OZfIsKn0= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 h1:o1cMErMp45oKZ2ScvBOdVXYhvu6FdUcz0Xn+JpDd408= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3/go.mod h1:TuSBSV1IedYHHrC4A3bW84WjQXNSzc6XasgvuDRDb4E= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 h1:VminN0bFfPQkaJ2MZOJh0d7+sVu0SKdZnO9FfyE1C18= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3/go.mod h1:SxcxnimuI5pVps173h7VcyuFadgOFFfl2aUXUCswoY0= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.4 h1:utG3S4T+X7nONPIpRoi1tVcQdAdJxntiVS2yolPJyXc= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.4/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.3 h1:r27/FnxLPixKBRIlslsvhqscBuMK8uysCYG9Kfgm098= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.3/go.mod h1:jqOFyN+QSWSoQC+ppyc4weiO8iNQXbzRbxDjQ1ayYd4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 h1:lhAX5f7KpgwyieXjbDnRTjPEUI0l3emSRyxXj1PXP8w= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16/go.mod h1:AblAlCwvi7Q/SFowvckgN+8M3uFPlopSYeLlbNDArhA= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13/go.mod h1:V390DK4MQxLpDdXxFqizyz8KUxuWImkW/xzgXMz0yyk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 h1:Oe8awBiS/iitcsRJB5+DHa3iCxoA0KwJJf0JNrYMINY= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4/go.mod h1:RCZCSFbieSgNG1RKegO26opXV4EXyef/vNBVJsUyHuw= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 h1:TIOEjw0i2yyhmhRry3Oeu9YtiiHWISZ6j/irS1W3gX4= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6/go.mod h1:3Ba++UwWd154xtP4FRX5pUK3Gt4up5sDHCve6kVfE+g= -github.com/aws/aws-sdk-go-v2/service/sqs v1.31.4 h1:mE2ysZMEeQ3ulHWs4mmc4fZEhOfeY1o6QXAfDqjbSgw= -github.com/aws/aws-sdk-go-v2/service/sqs v1.31.4/go.mod h1:lCN2yKnj+Sp9F6UzpoPPTir+tSaC9Jwf6LcmTqnXFZw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 h1:ktR7RUdUQ8m9rkgCPRsS7iTJgFp9MXEX0nltrT8bxY4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3/go.mod h1:hufTMUGSlcBLGgs6leSPbDfY1sM3mrO2qjtVkPMTDhE= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 h1:NgRFYyFpiMD62y4VPXh4DosPFbZd4vdMVBWKk0VmWXc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI= github.com/aws/aws-sdk-go-v2/service/sso v1.11.18/go.mod h1:ytmEi5+qwcSNcV2pVA8PIb1DnKT/0Bu/K4nfJHwoM6c= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1/go.mod h1:NY+G+8PW0ISyJ7/6t5mgOe6qpJiwZa9Jix05WPscJjg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= github.com/aws/aws-sdk-go-v2/service/sts v1.16.14/go.mod h1:Y+BUV19q3OmQVqNUlbZ40zVi3NM6Biuxwkx/qdSD/CY= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= github.com/aws/smithy-go v1.13.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -1525,8 +961,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 h1:XWuWBRFEpqVrHepQob9yPS3Xg4K3Wr9QCx4fu8HbUNg= -github.com/bradleyfalzon/ghinstallation/v2 v2.10.0/go.mod h1:qoGA4DxWPaYTgVCrmEspVSjlTu4WYAiSxMIhorMRXXc= +github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 h1:R9d0v+iobRHSaE4wKUnXFiZp53AL4ED5MzgEMwGTZag= +github.com/bradleyfalzon/ghinstallation/v2 v2.11.0/go.mod h1:0LWKQwOHewXO/1acI6TtyE0Xc4ObDb2rFN7eHBAG71M= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -1545,9 +981,7 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= @@ -1561,10 +995,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -1594,8 +1026,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dysnix/predictkube-libs v0.0.4-0.20230109175007-5a82fccd31c7 h1:UwnI1LSssPFHWaBtTCV5gOgz0skHYG7DdbRH6BTTp5o= github.com/dysnix/predictkube-libs v0.0.4-0.20230109175007-5a82fccd31c7/go.mod h1:BQ41gAkQrowPCIk3e30mKovJQ8sXUESgiJ5IPW+19E8= -github.com/dysnix/predictkube-proto v0.0.0-20220713123213-7135dce1e9c9 h1:6/RxCmoKBe0afV8gPYzXKIEhDcF7kzIGkmnhDr19e6w= -github.com/dysnix/predictkube-proto v0.0.0-20220713123213-7135dce1e9c9/go.mod h1:zTsQdEyzxs3OHHtrjf8WpmexujIMTYyCVz/38VCt0uo= +github.com/dysnix/predictkube-proto v0.0.0-20240703070754-1483183ce065 h1:YDYrOoOzJt9E9MrUbJrAdzHmY1JTWQWH63QlWNh5Bqo= +github.com/dysnix/predictkube-proto v0.0.0-20240703070754-1483183ce065/go.mod h1:4AJBqCAMzHVSSpsdUCCHVprjShAIuZDAbZyG6DQgoqY= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= @@ -1611,27 +1043,20 @@ github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v5.8.1+incompatible h1:2toJaoe7/rNa1zpeQx0UnVEjqk6z2ecyA20V/zg8vTU= github.com/evanphx/json-patch v5.8.1+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/expr-lang/expr v1.16.5 h1:m2hvtguFeVaVNTHj8L7BoAyt7O0PAIBaSVbjdHgRXMs= -github.com/expr-lang/expr v1.16.5/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= +github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= +github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/favadi/protoc-go-inject-tag v1.3.0/go.mod h1:SSkUBgfqw2IJ2p7NPNKWk0Idwxt/qIt2LQgFPUgRGtc= @@ -1676,10 +1101,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -1692,23 +1115,19 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= -github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= +github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= @@ -1716,9 +1135,6 @@ github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnD github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE= -github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU= github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -1744,7 +1160,7 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1790,7 +1206,6 @@ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9 github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -1812,10 +1227,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v50 v50.2.0 h1:j2FyongEHlO9nxXLc+LP3wuBSVU9mVxfpdYUexMpIfk= github.com/google/go-github/v50 v50.2.0/go.mod h1:VBY8FB6yPIjrtKhozXv4FQupxKLS6H4m6xFZlT43q8Q= -github.com/google/go-github/v60 v60.0.0 h1:oLG98PsLauFvvu4D/YPxq374jhSxFYdzQGNCyONLfn8= -github.com/google/go-github/v60 v60.0.0/go.mod h1:ByhX2dP9XT9o/ll2yXAu2VD8l5eNVg8hD4Cr0S/LmQk= -github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= -github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1845,23 +1258,19 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 h1:dHLYa5D8/Ta0aLR2XcPsrkpAgGeFs6thhMcQK0oQ0n8= -github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= @@ -1869,8 +1278,6 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -1887,13 +1294,12 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= -github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= +github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20210822113901-9ebd50d28389 h1:/XVr7qjOhyoE60WCbVN0HulV+3beN5Jo/YsZJsUE+mw= @@ -1906,10 +1312,10 @@ github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZH github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 h1:o95KDiV/b1xdkumY5YbLR0/n2+wBxUpgf3HgfKgTyLI= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3/go.mod h1:hTxjzRcX49ogbTGVJ1sM5mz5s+SSgiGIyL3jjPxl32E= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -1918,8 +1324,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -1964,15 +1370,13 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.13.0 h1:RTCGpE2Rgkn9jyPcFlc7YmNocomda44k5ck8FKMH41Y= -github.com/hashicorp/vault/api v1.13.0/go.mod h1:0cb/uZUv1w2cVu9DIvuW1SMlXXC6qtATJt+LXJRx+kg= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= +github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -1986,8 +1390,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= -github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -2036,12 +1440,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -2060,7 +1461,6 @@ github.com/labstack/echo/v4 v4.6.1/go.mod h1:RnjgMWNDB9g/HucVWhQYNQP9PvbYf6adqft github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -2073,7 +1473,6 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -2084,7 +1483,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -2092,9 +1490,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 h1:YH424zrwLTlyHSH/GzLMJeu5zhYVZSx5RQxGKm1h96s= @@ -2148,11 +1545,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= -github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/open-policy-agent/frameworks/constraint v0.0.0-20240411024313-c2efb00269a8 h1:+3lwaywVgMn4XfcYASBJs2V19XjsKlsRmUEne+Zn8eY= github.com/open-policy-agent/frameworks/constraint v0.0.0-20240411024313-c2efb00269a8/go.mod h1:6olMPE+rOIu3A1fNk9FaMAe18fTlJbElZUDz+Oi+MkU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -2170,7 +1567,6 @@ github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2 github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= @@ -2188,22 +1584,22 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= -github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ= -github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= -github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= -github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -2287,15 +1683,13 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= -github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94= +github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= @@ -2334,8 +1728,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= -github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -2347,26 +1741,25 @@ github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= gitlab.com/flimzy/testy v0.11.0 h1:YQxcTSqX/r9NYvwrNG+maswc7bw+r1Hh1RE/7e6JWQ4= gitlab.com/flimzy/testy v0.11.0/go.mod h1:tcu652e6AyD5wS8q2JRUI+j5SlwIYsl3yq3ulHyuh8M= -go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= -go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= -go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= -go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk= +go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM= +go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA= +go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU= go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= -go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= +go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4= +go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU= go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= -go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= -go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4= +go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -2376,49 +1769,34 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 h1:f2jriWfOdldanBwS9jNBdeOKAQN7b4ugAMaNu1/1k9g= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0/go.mod h1:B+bcQI1yTY+N0vqMpoZbEN7+XU4tNM0DmUiOwebFJWI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 h1:HGZWGmCVRCVyAs2GQaiHQPbDHo+ObFWeUEOd+zDnp64= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0/go.mod h1:SaH+v38LSCHddyk7RGlU9uZyQoRrKao6IBnJw6Kbn+c= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= -go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2428,7 +1806,6 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= @@ -2441,8 +1818,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2458,10 +1835,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2500,18 +1875,18 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2523,7 +1898,6 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -2542,13 +1916,10 @@ golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2567,11 +1938,10 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2634,16 +2004,13 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2657,20 +2024,20 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2743,17 +2110,15 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2770,7 +2135,6 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2837,23 +2201,14 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= -google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= -google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= -google.golang.org/api v0.160.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= -google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0= -google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= -google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= +google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= +google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2884,7 +2239,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2997,26 +2351,8 @@ google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51 google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= -google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -3025,33 +2361,11 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go. google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= -google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= -google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:PVreiBMirk8ypES6aw9d4p6iiBNSIfZEBqr3UGoAi2E= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20231212172506-995d672761c0/go.mod h1:guYXGPwC6jwxgWKW5Y405fKWOFNwlvUlUnzyp9i0uqo= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:ZSvZ8l+AWJwXw91DoTjWjaVLpWU6o0eZ4YLYpH8aLeQ= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:SCz6T5xjNXM4QFPRwxHcfChp7V+9DcXR3ay2TkHR8Tg= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -3061,32 +2375,16 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231211222908-989df2bf70f3/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240122161410-6c6643bf1457/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= -google.golang.org/grpc/examples v0.0.0-20210424002626-9572fd6faeae/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -3107,8 +2405,9 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3136,7 +2435,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -3145,50 +2443,47 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= -k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= -k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= -k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= -k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= -k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= -k8s.io/code-generator v0.29.4 h1:8ESudFNbY5/9BzB8KOEFG2uV9Q0AQxkc4mrQESr30Ks= -k8s.io/code-generator v0.29.4/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/api v0.29.7 h1:Q2/thp7YYESgy0MGzxT9RvA/6doLJHBXSFH8GGLxSbc= +k8s.io/api v0.29.7/go.mod h1:mPimdbyuIjwoLtBEVIGVUYb4BKOE+44XHt/n4IqKsLA= +k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= +k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc= +k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.7 h1:yvdZAZJ6Bmih8Iu8CqUB74otyPP0OM3XKzYzAFiWsY4= +k8s.io/apiserver v0.29.7/go.mod h1:MjEld3ySZrQ4eOzf6OKANT/v8GvM3sb1e5vozXoIirs= +k8s.io/client-go v0.29.7 h1:vTtiFrGBKlcBhxaeZC4eDrqui1e108nsTyue/KU63IY= +k8s.io/client-go v0.29.7/go.mod h1:69BvVqdRozgR/9TP45u/oO0tfrdbP+I8RqrcCJQshzg= +k8s.io/code-generator v0.29.7 h1:NEwmKOJVNObCh3upBLEojL1QuJMzGplOTYZnee4h0TY= +k8s.io/code-generator v0.29.7/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= +k8s.io/component-base v0.29.7 h1:zXLJvZjvvDWdYmZCwZYk95E1Fd2oRXUz71mQukkRk5I= +k8s.io/component-base v0.29.7/go.mod h1:ddLTpIrjazaRI1EG83M41GNcYEAdskuQmx4JOOSXCOg= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 h1:izq7u3SJBdOAuA5YYe1/PIp9jczrih/jGlKRRt0G7bQ= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.4 h1:cFGEoCLwoXk/eqYZppLZxybCdmEWeRKMCbm9f13IdRQ= -k8s.io/kms v0.29.4/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= +k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g= +k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= k8s.io/kube-aggregator v0.29.4 h1:yT7vYtwIag4G8HNrktYZ3qz6p6oHKronMAXOw4eQ2WQ= k8s.io/kube-aggregator v0.29.4/go.mod h1:zBfe4iXXmw5HinNgN0JoAu5rpXdyCUvRfG99+FVOd68= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/metrics v0.29.4 h1:06sZ63/Kt9HEb5GP/1y6xbHDz6XkxnHpu949UdXfoXQ= -k8s.io/metrics v0.29.4/go.mod h1:ZN9peB0nLTqPZuwQna8ZUrPFJQ0i8QNH4pqRJopS+9c= -k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 h1:ao5hUqGhsqdm+bYbjH/pRkCs0unBGe9UyDahzs9zQzQ= -k8s.io/utils v0.0.0-20240423183400-0849a56e8f22/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20240423132823-3c6badc82748 h1:0X8ZtnOZqGPjauVLLvOyMaBOMX5BBkvAD1/IuxA61Ys= -knative.dev/pkg v0.0.0-20240423132823-3c6badc82748/go.mod h1:Y/ufiCvMogYcpDwZJPcTRBYeBo57RaEQhY0Lq/9RKmU= +k8s.io/metrics v0.29.7 h1:/oMPdVL7dt+lF8W6lXTg9gIKz1dDKgVBfDnJwgyJrhk= +k8s.io/metrics v0.29.7/go.mod h1:5AiYPn1Crd25wtTh7OxHg9Rm2t9THSXJVp3Lb2k7MB8= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 h1:VHUW124ZpkDn4EnIzMuGWvGuJte3ISIoHMmEw2kx0zU= +knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653/go.mod h1:H+5rS2GEWpAZzrmQoXOEVq/1M77LLMhR7+4jZBMOQ24= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= -modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= -modernc.org/ccgo/v3 v3.0.0-20220910160915-348f15de615a/go.mod h1:8p47QxPkdugex9J4n9P2tLZ9bK01yngIVp00g4nomW0= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= @@ -3206,12 +2501,9 @@ modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= -modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= -modernc.org/libc v1.21.2/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= -modernc.org/libc v1.22.4/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -3225,17 +2517,14 @@ modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= -modernc.org/sqlite v1.21.2/go.mod h1:cxbLkB5WS32DnQqeH4h4o1B0eMr8W/y8/RGuxQ3JsC0= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= -modernc.org/tcl v1.15.1/go.mod h1:aEjeGJX2gz1oWKOLDVZ2tnEWLUrIn8H+GFu+akoDhqs= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -3244,24 +2533,24 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= -sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= -sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 h1:iTQSA9XwAEiPjoO/z3tZ4vAlbn6UXShvbRKWnN6P52w= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= -sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= -sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= -sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240425173932-1a855fe8c789 h1:TDueMxkzsIHjC9L7ArdGY1zpD5YmJ0xAiDEnhe172jU= -sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240425173932-1a855fe8c789/go.mod h1:4XXz92s/SEmP3L2nlUu6lMWorxEQXAD39AdL22IQkDA= +sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= +sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240804232438-89b5deec030c h1:5RmVH09e+y5kIkmpR0fQPP5xYHMPyUo59r3KbepC5+U= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240804232438-89b5deec030c/go.mod h1:lMOTIN14oWZXDAQ4YuBsRCpS/N5EyqrBJPwkcyZqmAk= +sigs.k8s.io/controller-tools v0.15.0 h1:4dxdABXGDhIa68Fiwaif0vcu32xfwmgQ+w8p+5CxoAI= +sigs.k8s.io/controller-tools v0.15.0/go.mod h1:8zUSS2T8Hx0APCNRhJWbS3CAQEbIxLa07khzh7pZmXM= +sigs.k8s.io/custom-metrics-apiserver v1.29.0 h1:uUoUjbPrE6nVBE82bo8siIkUDMsfbaSTBB6jAx/LJ9M= +sigs.k8s.io/custom-metrics-apiserver v1.29.0/go.mod h1:4XXz92s/SEmP3L2nlUu6lMWorxEQXAD39AdL22IQkDA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= -sigs.k8s.io/kustomize/api v0.17.1/go.mod h1:ffn5491s2EiNrJSmgqcWGzQUVhc/pB0OKNI0HsT/0tA= -sigs.k8s.io/kustomize/cmd/config v0.14.0 h1:TMKWBtk0Ox1EUwafT9hncg7EFrvrBlqDDHXmBHm51U8= -sigs.k8s.io/kustomize/cmd/config v0.14.0/go.mod h1:J+ukok4u7k8esCzEhRTygTW7dzDKYbC0kCDkpBK3VfI= -sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 h1:97qXYnSngAvY+gUAzMfjJ73ClKZAD+JrKE+7OIMWYhw= -sigs.k8s.io/kustomize/kustomize/v5 v5.4.1/go.mod h1:nKouayWQbaro85jrNbcA+tZ6x63wYYxGPgniA/fEmgQ= -sigs.k8s.io/kustomize/kyaml v0.17.0 h1:G2bWs03V9Ur2PinHLzTUJ8Ded+30SzXZKiO92SRDs3c= -sigs.k8s.io/kustomize/kyaml v0.17.0/go.mod h1:6lxkYF1Cv9Ic8g/N7I86cvxNc5iinUo/P2vKsHNmpyE= +sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= +sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= +sigs.k8s.io/kustomize/cmd/config v0.14.2 h1:YOCu0BnVPm2Iq6PR4fJgO6+rivg5LbR3+o/4ZUeXAvM= +sigs.k8s.io/kustomize/cmd/config v0.14.2/go.mod h1:w30rR4oCUm5wEi0tSuBCmuBMS9Z/Cq6oDdfg8fL/qls= +sigs.k8s.io/kustomize/kustomize/v5 v5.4.3 h1:SJMDq/0HYNTFPgmlBuxsGzdBB6furxhwHKHEfzBbvSw= +sigs.k8s.io/kustomize/kustomize/v5 v5.4.3/go.mod h1:vhSLBp3H7wx0oeh/hE7CMQVNft42plZxUqxhvVZxb8Q= +sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= +sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go index a4f0372aa1a..616f7c3604c 100644 --- a/pkg/metricsservice/api/metrics.pb.go +++ b/pkg/metricsservice/api/metrics.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v5.27.0 +// protoc-gen-go v1.34.2 +// protoc v5.27.3 // source: metrics.proto package api @@ -138,7 +138,7 @@ func file_metrics_proto_rawDescGZIP() []byte { } var file_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_metrics_proto_goTypes = []interface{}{ +var file_metrics_proto_goTypes = []any{ (*ScaledObjectRef)(nil), // 0: api.ScaledObjectRef (*v1beta1.ExternalMetricValueList)(nil), // 1: k8s.io.metrics.pkg.apis.external_metrics.v1beta1.ExternalMetricValueList } @@ -158,7 +158,7 @@ func file_metrics_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_metrics_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ScaledObjectRef); i { case 0: return &v.state diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go index ad6b36d807e..0cc421b3e40 100644 --- a/pkg/metricsservice/api/metrics_grpc.pb.go +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v5.27.0 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.3 // source: metrics.proto package api @@ -31,8 +31,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( MetricsService_GetMetrics_FullMethodName = "/api.MetricsService/GetMetrics" @@ -54,8 +54,9 @@ func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { } func (c *metricsServiceClient) GetMetrics(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*v1beta1.ExternalMetricValueList, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(v1beta1.ExternalMetricValueList) - err := c.cc.Invoke(ctx, MetricsService_GetMetrics_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, MetricsService_GetMetrics_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -64,20 +65,24 @@ func (c *metricsServiceClient) GetMetrics(ctx context.Context, in *ScaledObjectR // MetricsServiceServer is the server API for MetricsService service. // All implementations must embed UnimplementedMetricsServiceServer -// for forward compatibility +// for forward compatibility. type MetricsServiceServer interface { GetMetrics(context.Context, *ScaledObjectRef) (*v1beta1.ExternalMetricValueList, error) mustEmbedUnimplementedMetricsServiceServer() } -// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations. -type UnimplementedMetricsServiceServer struct { -} +// UnimplementedMetricsServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedMetricsServiceServer struct{} func (UnimplementedMetricsServiceServer) GetMetrics(context.Context, *ScaledObjectRef) (*v1beta1.ExternalMetricValueList, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") } func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {} +func (UnimplementedMetricsServiceServer) testEmbeddedByValue() {} // UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to MetricsServiceServer will @@ -87,6 +92,13 @@ type UnsafeMetricsServiceServer interface { } func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) { + // If the following call pancis, it indicates UnimplementedMetricsServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&MetricsService_ServiceDesc, srv) } diff --git a/pkg/mock/mock_eventemitter/mock_interface.go b/pkg/mock/mock_eventemitter/mock_interface.go index d3346ea50bf..81494706708 100644 --- a/pkg/mock/mock_eventemitter/mock_interface.go +++ b/pkg/mock/mock_eventemitter/mock_interface.go @@ -18,7 +18,6 @@ import ( gomock "go.uber.org/mock/gomock" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" ) // MockEventHandler is a mock of EventHandler interface. @@ -59,7 +58,7 @@ func (mr *MockEventHandlerMockRecorder) DeleteCloudEventSource(cloudEventSource } // Emit mocks base method. -func (m *MockEventHandler) Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType v1alpha1.CloudEventType, reason, message string) { +func (m *MockEventHandler) Emit(object runtime.Object, namesapce, eventType string, cloudeventType v1alpha1.CloudEventType, reason, message string) { m.ctrl.T.Helper() m.ctrl.Call(m, "Emit", object, namesapce, eventType, cloudeventType, reason, message) } diff --git a/pkg/scalers/authentication/authentication_helpers.go b/pkg/scalers/authentication/authentication_helpers.go index 0ffaab0a493..f3c4fa9c900 100644 --- a/pkg/scalers/authentication/authentication_helpers.go +++ b/pkg/scalers/authentication/authentication_helpers.go @@ -213,7 +213,7 @@ func CreateHTTPRoundTripper(roundTripperType TransportType, auth *AuthMeta, conf rt = pConfig.NewBasicAuthRoundTripper( auth.Username, pConfig.Secret(auth.Password), - "", roundTripper, + "", "", roundTripper, ) } diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go index 089dc0cfbfd..f6e0ef63acc 100644 --- a/pkg/scalers/external_scaler.go +++ b/pkg/scalers/external_scaler.go @@ -314,7 +314,7 @@ func getClientForConnectionPool(metadata externalScalerMetadata, logger logr.Log if err != nil { return nil, err } - return grpc.Dial(metadata.scalerAddress, + return grpc.NewClient(metadata.scalerAddress, grpc.WithDefaultServiceConfig(grpcConfig), grpc.WithTransportCredentials(creds)) } @@ -326,12 +326,12 @@ func getClientForConnectionPool(metadata externalScalerMetadata, logger logr.Log if len(tlsConfig.Certificates) > 0 || metadata.caCert != "" { // nosemgrep: go.grpc.ssrf.grpc-tainted-url-host.grpc-tainted-url-host - return grpc.Dial(metadata.scalerAddress, + return grpc.NewClient(metadata.scalerAddress, grpc.WithDefaultServiceConfig(grpcConfig), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) } - return grpc.Dial(metadata.scalerAddress, + return grpc.NewClient(metadata.scalerAddress, grpc.WithDefaultServiceConfig(grpcConfig), grpc.WithTransportCredentials(insecure.NewCredentials())) } diff --git a/pkg/scalers/external_scaler_test.go b/pkg/scalers/external_scaler_test.go index f5692a6b172..efc280054c9 100644 --- a/pkg/scalers/external_scaler_test.go +++ b/pkg/scalers/external_scaler_test.go @@ -247,7 +247,7 @@ func TestWaitForState(t *testing.T) { }() // build client connect to server - grpcClient, err := grpc.Dial(address, + grpcClient, err := grpc.NewClient(address, grpc.WithDefaultServiceConfig(grpcConfig), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index b7dad6dd09c..66469de84ef 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v5.27.0 +// protoc-gen-go v1.34.2 +// protoc v5.27.3 // source: externalscaler.proto package externalscaler @@ -480,7 +480,7 @@ func file_externalscaler_proto_rawDescGZIP() []byte { } var file_externalscaler_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_externalscaler_proto_goTypes = []interface{}{ +var file_externalscaler_proto_goTypes = []any{ (*ScaledObjectRef)(nil), // 0: externalscaler.ScaledObjectRef (*IsActiveResponse)(nil), // 1: externalscaler.IsActiveResponse (*GetMetricSpecResponse)(nil), // 2: externalscaler.GetMetricSpecResponse @@ -516,7 +516,7 @@ func file_externalscaler_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_externalscaler_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ScaledObjectRef); i { case 0: return &v.state @@ -528,7 +528,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*IsActiveResponse); i { case 0: return &v.state @@ -540,7 +540,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetMetricSpecResponse); i { case 0: return &v.state @@ -552,7 +552,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*MetricSpec); i { case 0: return &v.state @@ -564,7 +564,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetMetricsRequest); i { case 0: return &v.state @@ -576,7 +576,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GetMetricsResponse); i { case 0: return &v.state @@ -588,7 +588,7 @@ func file_externalscaler_proto_init() { return nil } } - file_externalscaler_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_externalscaler_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetricValue); i { case 0: return &v.state diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go index a61a3010aaf..c44e60849bd 100644 --- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go +++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v5.27.0 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.3 // source: externalscaler.proto package externalscaler @@ -15,8 +15,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( ExternalScaler_IsActive_FullMethodName = "/externalscaler.ExternalScaler/IsActive" @@ -30,7 +30,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type ExternalScalerClient interface { IsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*IsActiveResponse, error) - StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (ExternalScaler_StreamIsActiveClient, error) + StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IsActiveResponse], error) GetMetricSpec(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*GetMetricSpecResponse, error) GetMetrics(ctx context.Context, in *GetMetricsRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) } @@ -44,20 +44,22 @@ func NewExternalScalerClient(cc grpc.ClientConnInterface) ExternalScalerClient { } func (c *externalScalerClient) IsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*IsActiveResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(IsActiveResponse) - err := c.cc.Invoke(ctx, ExternalScaler_IsActive_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ExternalScaler_IsActive_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *externalScalerClient) StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (ExternalScaler_StreamIsActiveClient, error) { - stream, err := c.cc.NewStream(ctx, &ExternalScaler_ServiceDesc.Streams[0], ExternalScaler_StreamIsActive_FullMethodName, opts...) +func (c *externalScalerClient) StreamIsActive(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IsActiveResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ExternalScaler_ServiceDesc.Streams[0], ExternalScaler_StreamIsActive_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &externalScalerStreamIsActiveClient{stream} + x := &grpc.GenericClientStream[ScaledObjectRef, IsActiveResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -67,26 +69,13 @@ func (c *externalScalerClient) StreamIsActive(ctx context.Context, in *ScaledObj return x, nil } -type ExternalScaler_StreamIsActiveClient interface { - Recv() (*IsActiveResponse, error) - grpc.ClientStream -} - -type externalScalerStreamIsActiveClient struct { - grpc.ClientStream -} - -func (x *externalScalerStreamIsActiveClient) Recv() (*IsActiveResponse, error) { - m := new(IsActiveResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalScaler_StreamIsActiveClient = grpc.ServerStreamingClient[IsActiveResponse] func (c *externalScalerClient) GetMetricSpec(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*GetMetricSpecResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetMetricSpecResponse) - err := c.cc.Invoke(ctx, ExternalScaler_GetMetricSpec_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ExternalScaler_GetMetricSpec_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -94,8 +83,9 @@ func (c *externalScalerClient) GetMetricSpec(ctx context.Context, in *ScaledObje } func (c *externalScalerClient) GetMetrics(ctx context.Context, in *GetMetricsRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetMetricsResponse) - err := c.cc.Invoke(ctx, ExternalScaler_GetMetrics_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, ExternalScaler_GetMetrics_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -104,23 +94,26 @@ func (c *externalScalerClient) GetMetrics(ctx context.Context, in *GetMetricsReq // ExternalScalerServer is the server API for ExternalScaler service. // All implementations must embed UnimplementedExternalScalerServer -// for forward compatibility +// for forward compatibility. type ExternalScalerServer interface { IsActive(context.Context, *ScaledObjectRef) (*IsActiveResponse, error) - StreamIsActive(*ScaledObjectRef, ExternalScaler_StreamIsActiveServer) error + StreamIsActive(*ScaledObjectRef, grpc.ServerStreamingServer[IsActiveResponse]) error GetMetricSpec(context.Context, *ScaledObjectRef) (*GetMetricSpecResponse, error) GetMetrics(context.Context, *GetMetricsRequest) (*GetMetricsResponse, error) mustEmbedUnimplementedExternalScalerServer() } -// UnimplementedExternalScalerServer must be embedded to have forward compatible implementations. -type UnimplementedExternalScalerServer struct { -} +// UnimplementedExternalScalerServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExternalScalerServer struct{} func (UnimplementedExternalScalerServer) IsActive(context.Context, *ScaledObjectRef) (*IsActiveResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method IsActive not implemented") } -func (UnimplementedExternalScalerServer) StreamIsActive(*ScaledObjectRef, ExternalScaler_StreamIsActiveServer) error { +func (UnimplementedExternalScalerServer) StreamIsActive(*ScaledObjectRef, grpc.ServerStreamingServer[IsActiveResponse]) error { return status.Errorf(codes.Unimplemented, "method StreamIsActive not implemented") } func (UnimplementedExternalScalerServer) GetMetricSpec(context.Context, *ScaledObjectRef) (*GetMetricSpecResponse, error) { @@ -130,6 +123,7 @@ func (UnimplementedExternalScalerServer) GetMetrics(context.Context, *GetMetrics return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") } func (UnimplementedExternalScalerServer) mustEmbedUnimplementedExternalScalerServer() {} +func (UnimplementedExternalScalerServer) testEmbeddedByValue() {} // UnsafeExternalScalerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ExternalScalerServer will @@ -139,6 +133,13 @@ type UnsafeExternalScalerServer interface { } func RegisterExternalScalerServer(s grpc.ServiceRegistrar, srv ExternalScalerServer) { + // If the following call pancis, it indicates UnimplementedExternalScalerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ExternalScaler_ServiceDesc, srv) } @@ -165,21 +166,11 @@ func _ExternalScaler_StreamIsActive_Handler(srv interface{}, stream grpc.ServerS if err := stream.RecvMsg(m); err != nil { return err } - return srv.(ExternalScalerServer).StreamIsActive(m, &externalScalerStreamIsActiveServer{stream}) -} - -type ExternalScaler_StreamIsActiveServer interface { - Send(*IsActiveResponse) error - grpc.ServerStream + return srv.(ExternalScalerServer).StreamIsActive(m, &grpc.GenericServerStream[ScaledObjectRef, IsActiveResponse]{ServerStream: stream}) } -type externalScalerStreamIsActiveServer struct { - grpc.ServerStream -} - -func (x *externalScalerStreamIsActiveServer) Send(m *IsActiveResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalScaler_StreamIsActiveServer = grpc.ServerStreamingServer[IsActiveResponse] func _ExternalScaler_GetMetricSpec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScaledObjectRef) diff --git a/pkg/scalers/gcp/gcp_stackdriver_client.go b/pkg/scalers/gcp/gcp_stackdriver_client.go index 87893a3979e..4d44356e8ea 100644 --- a/pkg/scalers/gcp/gcp_stackdriver_client.go +++ b/pkg/scalers/gcp/gcp_stackdriver_client.go @@ -101,7 +101,7 @@ func NewStackDriverClientPodIdentity(ctx context.Context) (*StackDriverClient, e // Running workload identity outside GKE, we can't use the metadata api and we need to use the env that it's provided from the hook project, found := os.LookupEnv("CLOUDSDK_CORE_PROJECT") if !found { - project, err = c.ProjectID() + project, err = c.ProjectIDWithContext(ctx) if err != nil { return nil, err } diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index b50ab47173b..a122eb2c88d 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v5.27.0 +// protoc-gen-go v1.34.2 +// protoc v5.27.3 // source: LiiklusService.proto package liiklus @@ -1056,7 +1056,7 @@ func file_LiiklusService_proto_rawDescGZIP() []byte { var file_LiiklusService_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_LiiklusService_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_LiiklusService_proto_goTypes = []interface{}{ +var file_LiiklusService_proto_goTypes = []any{ (SubscribeRequest_AutoOffsetReset)(0), // 0: com.github.bsideup.liiklus.SubscribeRequest.AutoOffsetReset (*PublishRequest)(nil), // 1: com.github.bsideup.liiklus.PublishRequest (*PublishReply)(nil), // 2: com.github.bsideup.liiklus.PublishReply @@ -1110,7 +1110,7 @@ func file_LiiklusService_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_LiiklusService_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PublishRequest); i { case 0: return &v.state @@ -1122,7 +1122,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*PublishReply); i { case 0: return &v.state @@ -1134,7 +1134,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*SubscribeRequest); i { case 0: return &v.state @@ -1146,7 +1146,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Assignment); i { case 0: return &v.state @@ -1158,7 +1158,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*SubscribeReply); i { case 0: return &v.state @@ -1170,7 +1170,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*AckRequest); i { case 0: return &v.state @@ -1182,7 +1182,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ReceiveRequest); i { case 0: return &v.state @@ -1194,7 +1194,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ReceiveReply); i { case 0: return &v.state @@ -1206,7 +1206,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetOffsetsRequest); i { case 0: return &v.state @@ -1218,7 +1218,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*GetOffsetsReply); i { case 0: return &v.state @@ -1230,7 +1230,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*GetEndOffsetsRequest); i { case 0: return &v.state @@ -1242,7 +1242,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*GetEndOffsetsReply); i { case 0: return &v.state @@ -1254,7 +1254,7 @@ func file_LiiklusService_proto_init() { return nil } } - file_LiiklusService_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_LiiklusService_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ReceiveReply_Record); i { case 0: return &v.state @@ -1267,10 +1267,10 @@ func file_LiiklusService_proto_init() { } } } - file_LiiklusService_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_LiiklusService_proto_msgTypes[4].OneofWrappers = []any{ (*SubscribeReply_Assignment)(nil), } - file_LiiklusService_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_LiiklusService_proto_msgTypes[7].OneofWrappers = []any{ (*ReceiveReply_Record_)(nil), } type x struct{} diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index e24be98970f..2b06740fe4a 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v5.27.0 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.3 // source: LiiklusService.proto package liiklus @@ -16,8 +16,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LiiklusService_Publish_FullMethodName = "/com.github.bsideup.liiklus.LiiklusService/Publish" @@ -33,8 +33,8 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LiiklusServiceClient interface { Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishReply, error) - Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (LiiklusService_SubscribeClient, error) - Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (LiiklusService_ReceiveClient, error) + Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeReply], error) + Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReceiveReply], error) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) GetOffsets(ctx context.Context, in *GetOffsetsRequest, opts ...grpc.CallOption) (*GetOffsetsReply, error) GetEndOffsets(ctx context.Context, in *GetEndOffsetsRequest, opts ...grpc.CallOption) (*GetEndOffsetsReply, error) @@ -49,20 +49,22 @@ func NewLiiklusServiceClient(cc grpc.ClientConnInterface) LiiklusServiceClient { } func (c *liiklusServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PublishReply) - err := c.cc.Invoke(ctx, LiiklusService_Publish_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LiiklusService_Publish_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *liiklusServiceClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (LiiklusService_SubscribeClient, error) { - stream, err := c.cc.NewStream(ctx, &LiiklusService_ServiceDesc.Streams[0], LiiklusService_Subscribe_FullMethodName, opts...) +func (c *liiklusServiceClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeReply], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &LiiklusService_ServiceDesc.Streams[0], LiiklusService_Subscribe_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &liiklusServiceSubscribeClient{stream} + x := &grpc.GenericClientStream[SubscribeRequest, SubscribeReply]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -72,29 +74,16 @@ func (c *liiklusServiceClient) Subscribe(ctx context.Context, in *SubscribeReque return x, nil } -type LiiklusService_SubscribeClient interface { - Recv() (*SubscribeReply, error) - grpc.ClientStream -} - -type liiklusServiceSubscribeClient struct { - grpc.ClientStream -} - -func (x *liiklusServiceSubscribeClient) Recv() (*SubscribeReply, error) { - m := new(SubscribeReply) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LiiklusService_SubscribeClient = grpc.ServerStreamingClient[SubscribeReply] -func (c *liiklusServiceClient) Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (LiiklusService_ReceiveClient, error) { - stream, err := c.cc.NewStream(ctx, &LiiklusService_ServiceDesc.Streams[1], LiiklusService_Receive_FullMethodName, opts...) +func (c *liiklusServiceClient) Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReceiveReply], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &LiiklusService_ServiceDesc.Streams[1], LiiklusService_Receive_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &liiklusServiceReceiveClient{stream} + x := &grpc.GenericClientStream[ReceiveRequest, ReceiveReply]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -104,26 +93,13 @@ func (c *liiklusServiceClient) Receive(ctx context.Context, in *ReceiveRequest, return x, nil } -type LiiklusService_ReceiveClient interface { - Recv() (*ReceiveReply, error) - grpc.ClientStream -} - -type liiklusServiceReceiveClient struct { - grpc.ClientStream -} - -func (x *liiklusServiceReceiveClient) Recv() (*ReceiveReply, error) { - m := new(ReceiveReply) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LiiklusService_ReceiveClient = grpc.ServerStreamingClient[ReceiveReply] func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, LiiklusService_Ack_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LiiklusService_Ack_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -131,8 +107,9 @@ func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ... } func (c *liiklusServiceClient) GetOffsets(ctx context.Context, in *GetOffsetsRequest, opts ...grpc.CallOption) (*GetOffsetsReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetOffsetsReply) - err := c.cc.Invoke(ctx, LiiklusService_GetOffsets_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LiiklusService_GetOffsets_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -140,8 +117,9 @@ func (c *liiklusServiceClient) GetOffsets(ctx context.Context, in *GetOffsetsReq } func (c *liiklusServiceClient) GetEndOffsets(ctx context.Context, in *GetEndOffsetsRequest, opts ...grpc.CallOption) (*GetEndOffsetsReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetEndOffsetsReply) - err := c.cc.Invoke(ctx, LiiklusService_GetEndOffsets_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, LiiklusService_GetEndOffsets_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -150,28 +128,31 @@ func (c *liiklusServiceClient) GetEndOffsets(ctx context.Context, in *GetEndOffs // LiiklusServiceServer is the server API for LiiklusService service. // All implementations must embed UnimplementedLiiklusServiceServer -// for forward compatibility +// for forward compatibility. type LiiklusServiceServer interface { Publish(context.Context, *PublishRequest) (*PublishReply, error) - Subscribe(*SubscribeRequest, LiiklusService_SubscribeServer) error - Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error + Subscribe(*SubscribeRequest, grpc.ServerStreamingServer[SubscribeReply]) error + Receive(*ReceiveRequest, grpc.ServerStreamingServer[ReceiveReply]) error Ack(context.Context, *AckRequest) (*emptypb.Empty, error) GetOffsets(context.Context, *GetOffsetsRequest) (*GetOffsetsReply, error) GetEndOffsets(context.Context, *GetEndOffsetsRequest) (*GetEndOffsetsReply, error) mustEmbedUnimplementedLiiklusServiceServer() } -// UnimplementedLiiklusServiceServer must be embedded to have forward compatible implementations. -type UnimplementedLiiklusServiceServer struct { -} +// UnimplementedLiiklusServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLiiklusServiceServer struct{} func (UnimplementedLiiklusServiceServer) Publish(context.Context, *PublishRequest) (*PublishReply, error) { return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") } -func (UnimplementedLiiklusServiceServer) Subscribe(*SubscribeRequest, LiiklusService_SubscribeServer) error { +func (UnimplementedLiiklusServiceServer) Subscribe(*SubscribeRequest, grpc.ServerStreamingServer[SubscribeReply]) error { return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") } -func (UnimplementedLiiklusServiceServer) Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error { +func (UnimplementedLiiklusServiceServer) Receive(*ReceiveRequest, grpc.ServerStreamingServer[ReceiveReply]) error { return status.Errorf(codes.Unimplemented, "method Receive not implemented") } func (UnimplementedLiiklusServiceServer) Ack(context.Context, *AckRequest) (*emptypb.Empty, error) { @@ -184,6 +165,7 @@ func (UnimplementedLiiklusServiceServer) GetEndOffsets(context.Context, *GetEndO return nil, status.Errorf(codes.Unimplemented, "method GetEndOffsets not implemented") } func (UnimplementedLiiklusServiceServer) mustEmbedUnimplementedLiiklusServiceServer() {} +func (UnimplementedLiiklusServiceServer) testEmbeddedByValue() {} // UnsafeLiiklusServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LiiklusServiceServer will @@ -193,6 +175,13 @@ type UnsafeLiiklusServiceServer interface { } func RegisterLiiklusServiceServer(s grpc.ServiceRegistrar, srv LiiklusServiceServer) { + // If the following call pancis, it indicates UnimplementedLiiklusServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LiiklusService_ServiceDesc, srv) } @@ -219,42 +208,22 @@ func _LiiklusService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream if err := stream.RecvMsg(m); err != nil { return err } - return srv.(LiiklusServiceServer).Subscribe(m, &liiklusServiceSubscribeServer{stream}) + return srv.(LiiklusServiceServer).Subscribe(m, &grpc.GenericServerStream[SubscribeRequest, SubscribeReply]{ServerStream: stream}) } -type LiiklusService_SubscribeServer interface { - Send(*SubscribeReply) error - grpc.ServerStream -} - -type liiklusServiceSubscribeServer struct { - grpc.ServerStream -} - -func (x *liiklusServiceSubscribeServer) Send(m *SubscribeReply) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LiiklusService_SubscribeServer = grpc.ServerStreamingServer[SubscribeReply] func _LiiklusService_Receive_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ReceiveRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(LiiklusServiceServer).Receive(m, &liiklusServiceReceiveServer{stream}) -} - -type LiiklusService_ReceiveServer interface { - Send(*ReceiveReply) error - grpc.ServerStream + return srv.(LiiklusServiceServer).Receive(m, &grpc.GenericServerStream[ReceiveRequest, ReceiveReply]{ServerStream: stream}) } -type liiklusServiceReceiveServer struct { - grpc.ServerStream -} - -func (x *liiklusServiceReceiveServer) Send(m *ReceiveReply) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LiiklusService_ReceiveServer = grpc.ServerStreamingServer[ReceiveReply] func _LiiklusService_Ack_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AckRequest) diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go index 2587911209e..9ca9947fd0a 100644 --- a/pkg/scalers/liiklus_scaler.go +++ b/pkg/scalers/liiklus_scaler.go @@ -70,7 +70,7 @@ func NewLiiklusScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, err } - conn, err := grpc.Dial(lm.address, + conn, err := grpc.NewClient(lm.address, grpc.WithDefaultServiceConfig(grpcConfig), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { diff --git a/pkg/scalers/nats_jetstream_scaler.go b/pkg/scalers/nats_jetstream_scaler.go index 52f8043b2bd..47a0bbe1f36 100644 --- a/pkg/scalers/nats_jetstream_scaler.go +++ b/pkg/scalers/nats_jetstream_scaler.go @@ -218,20 +218,6 @@ func (s *natsJetStreamScaler) getNATSJetstreamMonitoringData(ctx context.Context } if s.metadata.clusterSize > 1 { - // we know who the consumer leader and its monitoring url is, query it directly - if s.metadata.consumerLeader != "" && s.metadata.monitoringLeaderURL != "" { - natsJetStreamMonitoringLeaderURL := s.metadata.monitoringLeaderURL - - jetStreamAccountResp, err = s.getNATSJetstreamMonitoringRequest(ctx, natsJetStreamMonitoringLeaderURL) - if err != nil { - return err - } - - s.setNATSJetStreamMonitoringData(jetStreamAccountResp, natsJetStreamMonitoringLeaderURL) - return nil - } - - // we haven't found the consumer yet, grab the list of hosts and try each one natsJetStreamMonitoringServerURL, err := s.getNATSJetStreamMonitoringServerURL("") if err != nil { return err diff --git a/pkg/scalers/nats_jetstream_scaler_test.go b/pkg/scalers/nats_jetstream_scaler_test.go index 10312d3873c..c77811011f7 100644 --- a/pkg/scalers/nats_jetstream_scaler_test.go +++ b/pkg/scalers/nats_jetstream_scaler_test.go @@ -440,22 +440,6 @@ func TestNATSJetStreamGetNATSJetstreamServerURL(t *testing.T) { } } -func TestInvalidateNATSJetStreamCachedMonitoringData(t *testing.T) { - meta, err := parseNATSJetStreamMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}) - if err != nil { - t.Fatal("Could not parse metadata:", err) - } - - mockJetStreamScaler := natsJetStreamScaler{ - stream: nil, - metadata: meta, - httpClient: http.DefaultClient, - logger: InitializeLogger(&scalersconfig.ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}, "nats_jetstream_scaler"), - } - - mockJetStreamScaler.invalidateNATSJetStreamCachedMonitoringData() -} - func TestNATSJetStreamClose(t *testing.T) { mockJetStreamScaler, err := NewNATSJetStreamScaler(&scalersconfig.ScalerConfig{TriggerMetadata: testNATSJetStreamGoodMetadata, TriggerIndex: 0}) if err != nil { diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go index a8695e37a79..78e2e5b446c 100644 --- a/pkg/scalers/predictkube_scaler.go +++ b/pkg/scalers/predictkube_scaler.go @@ -124,7 +124,7 @@ func (s *PredictKubeScaler) setupClientConn() error { return err } - connection, err := grpc.Dial(net.JoinHostPort(mlEngineHost, fmt.Sprintf("%d", mlEnginePort)), clientOpt...) + connection, err := grpc.NewClient(net.JoinHostPort(mlEngineHost, fmt.Sprintf("%d", mlEnginePort)), clientOpt...) if err != nil { return err } diff --git a/pkg/scaling/resolver/gcp_secretmanager_handler.go b/pkg/scaling/resolver/gcp_secretmanager_handler.go index 6a6cd5c3ef3..d95fdda5de4 100644 --- a/pkg/scaling/resolver/gcp_secretmanager_handler.go +++ b/pkg/scaling/resolver/gcp_secretmanager_handler.go @@ -105,7 +105,7 @@ func (vh *GCPSecretManagerHandler) Initialize(ctx context.Context, client client // and we need to use the env that it's provided from the hook project, found := os.LookupEnv("CLOUDSDK_CORE_PROJECT") if !found { - if project, err = metadata.NewClient(&http.Client{}).ProjectID(); err != nil { + if project, err = metadata.NewClient(&http.Client{}).ProjectIDWithContext(ctx); err != nil { return fmt.Errorf("failed to fetch gcp project id: %w", err) } } diff --git a/pkg/scaling/resolver/hashicorpvault_handler.go b/pkg/scaling/resolver/hashicorpvault_handler.go index 1781a204413..e80b7302cda 100644 --- a/pkg/scaling/resolver/hashicorpvault_handler.go +++ b/pkg/scaling/resolver/hashicorpvault_handler.go @@ -277,7 +277,7 @@ func (vh *HashicorpVaultHandler) getSecretValue(secret *kedav1alpha1.VaultSecret type SecretGroup struct { path string secretType kedav1alpha1.VaultSecretType - vaultPkiData *kedav1alpha1.VaultPkiData + vaultPkiData kedav1alpha1.VaultPkiData } // fetchSecret returns the vaultSecret at a given vault path. If the secret is a pki, then the secret will use the @@ -314,7 +314,7 @@ func (vh *HashicorpVaultHandler) ResolveSecrets(secrets []kedav1alpha1.VaultSecr grouped := make(map[SecretGroup][]kedav1alpha1.VaultSecret) vaultSecrets := make(map[SecretGroup]*vaultapi.Secret) for _, e := range secrets { - group := SecretGroup{secretType: e.Type, path: e.Path, vaultPkiData: &e.PkiData} + group := SecretGroup{secretType: e.Type, path: e.Path, vaultPkiData: e.PkiData} if _, ok := grouped[group]; !ok { grouped[group] = make([]kedav1alpha1.VaultSecret, 0) } @@ -322,7 +322,7 @@ func (vh *HashicorpVaultHandler) ResolveSecrets(secrets []kedav1alpha1.VaultSecr } // For each group fetch the secret from vault for group := range grouped { - vaultSecret, err := vh.fetchSecret(group.secretType, group.path, group.vaultPkiData) + vaultSecret, err := vh.fetchSecret(group.secretType, group.path, &group.vaultPkiData) if err != nil { // could not fetch secret, skipping group continue diff --git a/tests/scalers/mssql/mssql_test.go b/tests/scalers/mssql/mssql_test.go index cb4c3f41495..6285bc1a7ef 100644 --- a/tests/scalers/mssql/mssql_test.go +++ b/tests/scalers/mssql/mssql_test.go @@ -170,7 +170,7 @@ spec: command: - /bin/sh - -c - - "/opt/mssql-tools/bin/sqlcmd -S . -U sa -P '{{.MssqlPassword}}' -Q 'SELECT @@Version'" + - "/opt/mssql-tools18/bin/sqlcmd -S . -C -U sa -P '{{.MssqlPassword}}' -Q 'SELECT @@Version'" ` mssqlServiceTemplate = `apiVersion: v1 @@ -261,12 +261,12 @@ func TestMssqlScaler(t *testing.T) { require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, mssqlServerName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) - createDatabaseCommand := fmt.Sprintf("/opt/mssql-tools/bin/sqlcmd -S . -U sa -P \"%s\" -Q \"CREATE DATABASE [%s]\"", mssqlPassword, mssqlDatabase) + createDatabaseCommand := fmt.Sprintf("/opt/mssql-tools18/bin/sqlcmd -S . -C -U sa -P \"%s\" -Q \"CREATE DATABASE [%s]\"", mssqlPassword, mssqlDatabase) ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, mssqlServerPodName, testNamespace, createDatabaseCommand, 60, 3) require.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) - createTableCommand := fmt.Sprintf("/opt/mssql-tools/bin/sqlcmd -S . -U sa -P \"%s\" -d %s -Q \"CREATE TABLE tasks ([id] int identity primary key, [status] varchar(10))\"", + createTableCommand := fmt.Sprintf("/opt/mssql-tools18/bin/sqlcmd -S . -C -U sa -P \"%s\" -d %s -Q \"CREATE TABLE tasks ([id] int identity primary key, [status] varchar(10))\"", mssqlPassword, mssqlDatabase) ok, out, errOut, err = WaitForSuccessfulExecCommandOnSpecificPod(t, mssqlServerPodName, testNamespace, createTableCommand, 60, 3) require.True(t, ok, "executing a command on MS SQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) diff --git a/tests/scalers/nats_jetstream/helper/nats_helper.go b/tests/scalers/nats_jetstream/helper/nats_helper.go index bbce0aca4e4..bc056d535ba 100644 --- a/tests/scalers/nats_jetstream/helper/nats_helper.go +++ b/tests/scalers/nats_jetstream/helper/nats_helper.go @@ -100,6 +100,28 @@ spec: ] restartPolicy: OnFailure backoffLimit: 4 + ` + + StepDownConsumer = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: step-down + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 15 + template: + spec: + containers: + - name: stepdown + image: "natsio/nats-box:0.13.2" + imagePullPolicy: Always + command: [ + 'sh', '-c', 'nats context save local --server {{.NatsAddress}} --select && + nats consumer cluster step-down {{.NatsStream}} {{.NatsConsumer}}' + ] + restartPolicy: OnFailure + backoffLimit: 4 ` DeploymentTemplate = ` diff --git a/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go b/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go index a1cc087872c..87e09af8457 100644 --- a/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go +++ b/tests/scalers/nats_jetstream/nats_jetstream_cluster/nats_jetstream_cluster_test.go @@ -225,6 +225,10 @@ func testActivation(t *testing.T, kc *k8s.Clientset, data nats.JetStreamDeployme func testScaleOut(t *testing.T, kc *k8s.Clientset, data nats.JetStreamDeploymentTemplateData) { t.Log("--- testing scale out ---") + // We force the change of consumer leader to ensure that KEDA detects the change and + // handles it properly + KubectlApplyWithTemplate(t, data, "stepDownTemplate", nats.StepDownConsumer) + KubectlApplyWithTemplate(t, data, "publishJobTemplate", nats.PublishJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index f1b1a033e88..d1b736c748c 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,93 @@ # Changelog +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + ## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index ea7c1b0ad8d..2a4350af3f1 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -39,11 +39,26 @@ const ( // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. - defaultExpiryDelta = 215 * time.Second + defaultExpiryDelta = 225 * time.Second universeDomainDefault = "googleapis.com" ) +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + var ( defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} @@ -81,13 +96,27 @@ type Token struct { // IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not // expired. A token is considered expired if [Token.Expiry] has passed or will -// pass in the next 10 seconds. +// pass in the next 225 seconds. func (t *Token) IsValid() bool { return t.isValidWithEarlyExpiry(defaultExpiryDelta) } +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { - if t == nil || t.Value == "" { + if t.isEmpty() { return false } if t.Expiry.IsZero() { @@ -96,6 +125,10 @@ func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) } +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + // Credentials holds Google credentials, including // [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). type Credentials struct { @@ -206,11 +239,15 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { // CachedTokenProvider. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, - // even if it is expired. + // even if it is expired. The default is false. Optional. DisableAutoRefresh bool // ExpireEarly configures the amount of time before a token expires, that it - // should be refreshed. If unset, the default value is 10 seconds. + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool } func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { @@ -227,34 +264,126 @@ func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { return ctpo.ExpireEarly } +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned -// by the underlying provider. By default it will refresh tokens ten seconds -// before they expire, but this time can be configured with the optional -// options. +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp } return &cachedTokenProvider{ - tp: tp, - autoRefresh: opts.autoRefresh(), - expireEarly: opts.expireEarly(), + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), } } type cachedTokenProvider struct { - tp TokenProvider - autoRefresh bool - expireEarly time.Duration + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool mu sync.Mutex cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool } func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + c.tokenAsync(ctx) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() - if c.cachedToken.IsValid() || !c.autoRefresh { + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { return c.cachedToken, nil } t, err := c.tp.Token(ctx) @@ -423,12 +552,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { v := url.Values{} v.Set("grant_type", defaultGrantType) v.Set("assertion", payload) - resp, err := tp.Client.PostForm(tp.opts.TokenURL, v) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6db643837e2..6f70fa353b0 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,9 +37,10 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{ - ExpireEarly: earlyExpiry, +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, }) } @@ -64,9 +65,9 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.Get(tokenURI.String()) + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } var res metadataTokenResp if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cb3f44f5873..2d9a73edf36 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -37,6 +37,9 @@ const ( googleAuthURL = "https://accounts.google.com/o/oauth2/auth" googleTokenURL = "https://oauth2.googleapis.com/token" + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // Help on default credentials adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" ) @@ -73,16 +76,18 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } - if opts.CredentialsJSON != nil { + if len(opts.CredentialsJSON) > 0 { return readCredentialsFileJSON(opts.CredentialsJSON, opts) } if opts.CredentialsFile != "" { return readCredentialsFile(opts.CredentialsFile, opts) } if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { - if creds, err := readCredentialsFile(filename, opts); err == nil { - return creds, err + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err } + return creds, nil } fileName := credsfile.GetWellKnownFileName() @@ -92,7 +97,7 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...), + TokenProvider: computeTokenProvider(opts), ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { return metadata.ProjectID() }), @@ -116,8 +121,13 @@ type DetectOptions struct { // Optional. Subject string // EarlyTokenRefresh configures how early before a token expires that it - // should be refreshed. + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool // AuthHandlerOptions configures an authorization handler and other options // for 3LO flows. It is required, and only used, for client credential // flows. diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index a66e56d70f8..fe93557389d 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -137,6 +137,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) Scopes: opts.scopes(), TokenURL: f.TokenURL, Subject: opts.Subject, + Client: opts.client(), } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -154,6 +155,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) AuthStyle: auth.StyleInParams, EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, + Client: opts.client(), } return auth.New3LOTokenProvider(opts3LO) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index d9e1dcddf64..a34f6b06f84 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -122,7 +122,7 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) if err != nil { return "", err } @@ -194,20 +194,14 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } - return string(respBody), nil + return string(body), nil } func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { @@ -233,29 +227,21 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } // This endpoint will return the region in format: us-east-2b. // Only the us-east-2 part should be used. - bodyLen := len(respBody) + bodyLen := len(body) if bodyLen == 0 { return "", nil } - return string(respBody[:bodyLen-1]), nil + return string(body[:bodyLen-1]), nil } func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { @@ -299,22 +285,17 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } - - resp, err := sp.Client.Do(req) - if err != nil { - return result, err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } if resp.StatusCode != http.StatusOK { - return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody) + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err } - err = json.Unmarshal(respBody, &result) - return result, err + return result, nil } func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { @@ -329,20 +310,14 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } - resp, err := sp.Client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody) + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } - return string(respBody), nil + return string(body), nil } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 22b8af1c11b..e33d35a2687 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -48,27 +48,21 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } - resp, err := sp.Client.Do(req) + resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } - defer resp.Body.Close() - - respBody, err := internal.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { - return "", fmt.Errorf("credentials: status code %d: %s", c, respBody) + return "", fmt.Errorf("credentials: status code %d: %s", c, body) } if sp.Format == nil { - return string(respBody), nil + return string(body), nil } switch sp.Format.Type { case "json": jsonData := make(map[string]interface{}) - err = json.Unmarshal(respBody, &jsonData) + err = json.Unmarshal(body, &jsonData) if err != nil { return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) } @@ -82,7 +76,7 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) } return token, nil case fileTypeText: - return string(respBody), nil + return string(body), nil default: return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 467edb9088e..720045d3b07 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -25,6 +25,7 @@ import ( "net/http" "net/url" "os" + "strings" "time" "cloud.google.com/go/auth" @@ -129,12 +130,13 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("requested_token_type", requestTokenType) v.Set("subject_token", payload) v.Set("subject_token_type", subjectTokenType) - resp, err := g.client.PostForm(g.tokenURL, v) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) if err != nil { - return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + return nil, err } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index 3ceab873b8e..ed53afa519e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -109,15 +109,10 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } - resp, err := o.Client.Do(req) + resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } - defer resp.Body.Close() - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("credentials: unable to read body: %w", err) - } if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index f70e0aef48f..768a9dafc13 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -93,16 +93,10 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) - resp, err := opts.Client.Do(req) + resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } - defer resp.Body.Close() - - body, err := internal.ReadAll(resp.Body) - if err != nil { - return nil, err - } if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index 8dbfa7ef7e9..efc91c2b0c3 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -66,10 +66,10 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } - if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" { + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } - if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" { + if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } return true diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 81c956b030b..0442a5938a8 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -16,6 +16,7 @@ package grpctransport import ( "context" + "crypto/tls" "errors" "fmt" "net/http" @@ -45,9 +46,14 @@ var ( timeoutDialerOption grpc.DialOption ) +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + // Options used to configure a [GRPCClientConnPool] from [Dial]. type Options struct { - // DisableTelemetry disables default telemetry (OpenCensus). An example + // DisableTelemetry disables default telemetry (OpenTelemetry). An example // reason to do so would be to bind custom telemetry that overrides the // defaults. DisableTelemetry bool @@ -69,6 +75,10 @@ type Options struct { // Credentials used to add Authorization metadata to all requests. If set // DetectOpts are ignored. Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider // DetectOpts configures settings for detect Application Default // Credentials. DetectOpts *credentials.DetectOptions @@ -77,6 +87,9 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -99,7 +112,8 @@ func (o *Options) validate() error { if o.InternalOptions != nil && o.InternalOptions.SkipValidation { return nil } - hasCreds := o.Credentials != nil || + hasCreds := o.APIKey != "" || + o.Credentials != nil || (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") if o.DisableAuthentication && hasCreds { @@ -125,6 +139,13 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } return do } @@ -189,9 +210,10 @@ func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, // return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { tOpts := &transport.Options{ - Endpoint: opts.Endpoint, - Client: opts.client(), - UniverseDomain: opts.UniverseDomain, + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -213,8 +235,21 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpc.WithTransportCredentials(transportCreds), } - // Authentication can only be sent when communicating over a secure connection. - if !opts.DisableAuthentication { + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { metadata := opts.Metadata var creds *auth.Credentials @@ -259,6 +294,26 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er return grpc.DialContext(ctx, endpoint, grpcOpts...) } +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure +} + // grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. type grpcCredentialsProvider struct { creds *auth.Credentials @@ -282,17 +337,19 @@ func (c *grpcCredentialsProvider) getClientUniverseDomain() string { } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := c.creds.Token(ctx) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } if c.secure { ri, _ := grpccreds.RequestInfoFromContext(ctx) if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 06acc04151a..969c8d4d200 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -33,7 +33,7 @@ type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, e // Options used to configure a [net/http.Client] from [NewClient]. type Options struct { - // DisableTelemetry disables default telemetry (OpenCensus). An example + // DisableTelemetry disables default telemetry (OpenTelemetry). An example // reason to do so would be to bind custom telemetry that overrides the // defaults. DisableTelemetry bool @@ -116,6 +116,13 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { if len(do.Scopes) == 0 && do.Audience == "" && io != nil { do.Audience = o.InternalOptions.DefaultAudience } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } return do } @@ -152,7 +159,14 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er } base := client.Transport if base == nil { - base = http.DefaultTransport.(*http.Transport).Clone() + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } } client.Transport = &authTransport{ creds: creds, @@ -188,6 +202,8 @@ func NewClient(opts *Options) (*http.Client, error) { if baseRoundTripper == nil { baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider trans, err := newTransport(baseRoundTripper, opts) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 94caeb00f0a..07eea474446 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -193,17 +193,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { } }() } - credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := t.creds.Token(req.Context()) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } req2 := req.Clone(req.Context()) SetAuthHeader(token, req2) reqBodyClosed = true diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 70534e809a4..e56daa78106 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -124,6 +124,21 @@ func GetProjectID(b []byte, override string) string { return v.Project } +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + // ReadAll consumes the whole reader and safely reads the content of its body // with some overflow protection. func ReadAll(r io.Reader) ([]byte, error) { @@ -166,9 +181,9 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string // httpGetMetadataUniverseDomain is a package var for unit test substitution. var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { - client := metadata.NewClient(&http.Client{Timeout: time.Second}) - // TODO(quartzmo): set ctx on request - return client.Get("universe/universe_domain") + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe_domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 75734906259..d94e0af08a3 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -176,7 +176,7 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } func getTransportConfig(opts *Options) (*transportConfig, error) { - clientCertSource, err := getClientCertificateSource(opts) + clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { return nil, err } @@ -210,14 +210,14 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { }, nil } -// getClientCertificateSource returns a default client certificate source, if +// GetClientCertificateProvider returns a default client certificate source, if // not provided by the user. // // A nil default source can be returned if the source does not exist. Any exceptions // encountered while initializing the default source will be reported as client // error (ex. corrupt metadata file). -func getClientCertificateSource(opts *Options) (cert.Provider, error) { - if !isClientCertificateEnabled() { +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { return nil, nil } else if opts.ClientCertProvider != nil { return opts.ClientCertProvider, nil @@ -226,14 +226,14 @@ func getClientCertificateSource(opts *Options) (cert.Provider, error) { } -// isClientCertificateEnabled returns true by default, unless explicitly set to false via env var. -func isClientCertificateEnabled() bool { +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { // error as false is OK b, _ := strconv.ParseBool(value) return b } - return true + return opts.isUniverseDomainGDU() } type transportConfig struct { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go index 96582ce7b6a..5cedc50f1e8 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -50,11 +50,14 @@ var errSourceUnavailable = errors.New("certificate source is unavailable") // returned to indicate that a default certificate source is unavailable. func DefaultProvider() (Provider, error) { defaultCert.once.Do(func() { - defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.provider, defaultCert.err = nil, nil + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } } } }) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 00000000000..e8675bf824b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", errSourceUnavailable + } + return "", "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errSourceUnavailable + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index b76386d3c0d..718a6b17145 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -17,7 +17,11 @@ package transport import ( + "crypto/tls" "fmt" + "net" + "net/http" + "time" "cloud.google.com/go/auth/credentials" ) @@ -49,11 +53,11 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt } // Smartly size this memory and copy below. - if oldDo.CredentialsJSON != nil { + if len(oldDo.CredentialsJSON) > 0 { newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) } - if oldDo.Scopes != nil { + if len(oldDo.Scopes) > 0 { newDo.Scopes = make([]string, len(oldDo.Scopes)) copy(newDo.Scopes, oldDo.Scopes) } @@ -74,3 +78,26 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri } return nil } + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index ff9747beda0..f75c8e204a1 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 1b8d83c4b4f..a8ce6cd8a8d 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -62,7 +62,8 @@ type Options3LO struct { // Optional. Client *http.Client // EarlyTokenExpiry is the time before the token expires that it should be - // refreshed. If not set the default value is 10 seconds. Optional. + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. EarlyTokenExpiry time.Duration // AuthHandlerOpts provides a set of options for doing a @@ -284,7 +285,7 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin v.Set("client_secret", o.ClientSecret) } } - req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, refreshToken, err } @@ -294,25 +295,19 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin } // Make request - r, err := o.client().Do(req.WithContext(ctx)) + resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } - body, err := internal.ReadAll(r.Body) - r.Body.Close() - if err != nil { - return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err) - } - - failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ - Response: r, + Response: resp, Body: body, } var token *Token // errors ignored because of default switch on content - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": // some endpoints return a query string diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e060747f..9594e1e2793 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,24 @@ # Changes +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7eeae..345080b7297 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -28,7 +28,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -88,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,21 +189,9 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +214,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -296,7 +418,6 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } @@ -381,6 +502,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +517,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +530,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +638,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +669,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +706,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +732,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 00000000000..e0704fa6477 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go new file mode 100644 index 00000000000..74689acbbbf --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -0,0 +1,28 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package metadata + +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 00000000000..c0ce627872f --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 11cf3ce4f64..bae4733aebb 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,41 @@ # Changes +## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24) + + +### Bug Fixes + +* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26) + + +### Bug Fixes + +* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + +## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) + + +### Bug Fixes + +* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + ## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 6916cfa45a8..619b4c4fa3f 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/iam_policy.proto package iampb @@ -388,7 +388,7 @@ func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { } var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_iam_policy_proto_goTypes = []any{ (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest @@ -422,7 +422,7 @@ func file_google_iam_v1_iam_policy_proto_init() { file_google_iam_v1_options_proto_init() file_google_iam_v1_policy_proto_init() if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRequest); i { case 0: return &v.state @@ -434,7 +434,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRequest); i { case 0: return &v.state @@ -446,7 +446,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRequest); i { case 0: return &v.state @@ -458,7 +458,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsResponse); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 30865602b48..f1c1c084e34 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/options.proto package iampb @@ -136,7 +136,7 @@ func file_google_iam_v1_options_proto_rawDescGZIP() []byte { } var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ +var file_google_iam_v1_options_proto_goTypes = []any{ (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions } var file_google_iam_v1_options_proto_depIdxs = []int32{ @@ -153,7 +153,7 @@ func file_google_iam_v1_options_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GetPolicyOptions); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 84afa92b519..4dda5d6d056 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/policy.proto package iampb @@ -1036,7 +1036,7 @@ func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_policy_proto_goTypes = []any{ (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action @@ -1073,7 +1073,7 @@ func file_google_iam_v1_policy_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Policy); i { case 0: return &v.state @@ -1085,7 +1085,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Binding); i { case 0: return &v.state @@ -1097,7 +1097,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AuditConfig); i { case 0: return &v.state @@ -1109,7 +1109,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AuditLogConfig); i { case 0: return &v.state @@ -1121,7 +1121,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PolicyDelta); i { case 0: return &v.state @@ -1133,7 +1133,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*BindingDelta); i { case 0: return &v.state @@ -1145,7 +1145,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AuditConfigDelta); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index beb47b5164e..a3e99df29b2 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -776,7 +776,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { @@ -826,7 +826,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/contactcenterinsights/apiv1": { @@ -1029,6 +1029,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/developerconnect/apiv1": { + "api_shortname": "developerconnect", + "distribution_name": "cloud.google.com/go/developerconnect/apiv1", + "description": "Developer Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/apiv2": { "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -1519,6 +1529,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/managedkafka/apiv1": { + "api_shortname": "managedkafka", + "distribution_name": "cloud.google.com/go/managedkafka/apiv1", + "description": "Apache Kafka for BigQuery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/addressvalidation/apiv1": { "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", @@ -1549,16 +1569,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "api_shortname": "mapsplatformdatasets", - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/maps/places/apiv1": { "api_shortname": "places", "distribution_name": "cloud.google.com/go/maps/places/apiv1", @@ -1566,7 +1576,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "stable", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routeoptimization/apiv1": { @@ -1596,7 +1606,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { @@ -1666,7 +1676,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { @@ -1706,7 +1716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { @@ -1749,6 +1759,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/networkservices/apiv1": { + "api_shortname": "networkservices", + "distribution_name": "cloud.google.com/go/networkservices/apiv1", + "description": "Network Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/notebooks/apiv1": { "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", @@ -2076,7 +2096,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { @@ -2112,7 +2132,7 @@ "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", @@ -2122,7 +2142,7 @@ "cloud.google.com/go/retail/apiv2alpha": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", @@ -2132,7 +2152,7 @@ "cloud.google.com/go/retail/apiv2beta": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", @@ -2219,6 +2239,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/security/publicca/apiv1": { + "api_shortname": "publicca", + "distribution_name": "cloud.google.com/go/security/publicca/apiv1", + "description": "Public Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/security/publicca/apiv1beta1": { "api_shortname": "publicca", "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", @@ -2336,7 +2366,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { @@ -2379,6 +2409,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", @@ -2389,6 +2429,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", @@ -2419,6 +2469,26 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/products/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", @@ -2866,7 +2936,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workstations/apiv1beta": { diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh new file mode 100644 index 00000000000..59c19065380 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/gen_info.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Script to generate info.go files with methods for all clients. + +if [[ $# != 2 ]]; then + echo >&2 "usage: $0 DIR PACKAGE" + exit 1 +fi + +outfile=info.go + +cd $1 + +cat <<'EOF' > $outfile +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. + +EOF + +echo -e >> $outfile "package $2\n" + + +awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ { + printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3); + printf(" c.setGoogleClientInfo(keyval...)\n"); + printf("}\n\n"); +}' *_client.go >> $outfile + +gofmt -w $outfile diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go index 97738b2cbee..e8daf800a6a 100644 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -33,17 +33,22 @@ import ( ) const ( + // Deprecated: The default experimental tracing support for OpenCensus is + // now deprecated in the Google Cloud client libraries for Go. // TelemetryPlatformTracingOpenCensus is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenCensus tracing. TelemetryPlatformTracingOpenCensus = "opencensus" - // TelemetryPlatformTracingOpenCensus is the value to which the environment + // TelemetryPlatformTracingOpenTelemetry is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenTelemetry tracing. TelemetryPlatformTracingOpenTelemetry = "opentelemetry" - // TelemetryPlatformTracingOpenCensus is the name of the environment - // variable that can be set to change the default tracing from OpenCensus - // to OpenTelemetry. + // TelemetryPlatformTracingVar is the name of the environment + // variable that can be set to change the default tracing from OpenTelemetry + // to OpenCensus. + // + // The default experimental tracing support for OpenCensus is now deprecated + // in the Google Cloud client libraries for Go. TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer // when it is obtained from the OpenTelemetry TracerProvider. @@ -51,47 +56,58 @@ const ( ) var ( - // openTelemetryTracingEnabledMu guards access to openTelemetryTracingEnabled field - openTelemetryTracingEnabledMu = sync.RWMutex{} - // openTelemetryTracingEnabled is true if the environment variable + // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field + openCensusTracingEnabledMu = sync.RWMutex{} + // openCensusTracingEnabled is true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the - // case-insensitive value "opentelemetry". - openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( - os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) + // case-insensitive value "opencensus". + openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus) ) -// SetOpenTelemetryTracingEnabledField programmatically sets the value provided by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of unit testing. -// Do not invoke it directly. Intended for use only in unit tests. Restore original value after each test. +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided +// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of +// unit testing. Do not invoke it directly. Intended for use only in unit tests. +// Restore original value after each test. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func SetOpenTelemetryTracingEnabledField(enabled bool) { - openTelemetryTracingEnabledMu.Lock() - defer openTelemetryTracingEnabledMu.Unlock() - openTelemetryTracingEnabled = enabled + openCensusTracingEnabledMu.Lock() + defer openCensusTracingEnabledMu.Unlock() + openCensusTracingEnabled = !enabled } +// Deprecated: The default experimental tracing support for OpenCensus is now +// deprecated in the Google Cloud client libraries for Go. +// // IsOpenCensusTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the +// case-insensitive value "opencensus". func IsOpenCensusTracingEnabled() bool { - return !IsOpenTelemetryTracingEnabled() + openCensusTracingEnabledMu.RLock() + defer openCensusTracingEnabledMu.RUnlock() + return openCensusTracingEnabled } // IsOpenTelemetryTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the +// case-insensitive value "opencensus". func IsOpenTelemetryTracingEnabled() bool { - openTelemetryTracingEnabledMu.RLock() - defer openTelemetryTracingEnabledMu.RUnlock() - return openTelemetryTracingEnabled + return !IsOpenCensusTracingEnabled() } // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func StartSpan(ctx context.Context, name string) context.Context { if IsOpenTelemetryTracingEnabled() { ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) @@ -105,10 +121,13 @@ func StartSpan(ctx context.Context, name string) context.Context { // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func EndSpan(ctx context.Context, err error) { if IsOpenTelemetryTracingEnabled() { span := ottrace.SpanFromContext(ctx) @@ -191,10 +210,13 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 { // OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected // span must be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { if IsOpenTelemetryTracingEnabled() { attrs := otAttrs(attrMap) diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go index be257cb277f..ae1dd6b9a23 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -54,6 +54,7 @@ func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -272,7 +273,9 @@ func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn { func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go index e18457af4ab..da216081d5a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -56,6 +56,7 @@ func defaultGroupGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -293,7 +294,9 @@ func (c *groupGRPCClient) Connection() *grpc.ClientConn { func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go index 681da918632..d43d261d185 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -60,6 +60,7 @@ func defaultMetricGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -331,7 +332,9 @@ func (c *metricGRPCClient) Connection() *grpc.ClientConn { func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index 13ad3f1ffc3..e7b3595fa64 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/alert.proto package monitoringpb @@ -480,6 +480,9 @@ type AlertPolicy_Documentation struct { // If this field is missing or empty, a default subject line will be // generated. Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Optional. Links to content such as playbooks, repositories, and other + // resources. This field can contain up to 3 entries. + Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"` } func (x *AlertPolicy_Documentation) Reset() { @@ -535,6 +538,13 @@ func (x *AlertPolicy_Documentation) GetSubject() string { return "" } +func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link { + if x != nil { + return x.Links + } + return nil +} + // A condition is a true/false test that determines when an alerting policy // should open an incident. If a condition evaluates to true, it signifies // that something is wrong. @@ -786,6 +796,69 @@ func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPol return nil } +// Links to content such as playbooks, repositories, and other resources. +type AlertPolicy_Documentation_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short display name for the link. The display name must not be empty + // or exceed 63 characters. Example: "playbook". + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The url of a webpage. + // A url can be templatized by using variables + // in the path or the query parameters. The total length of a URL should + // not exceed 2083 characters before and after variable expansion. + // Example: "https://my_domain.com/playbook?name=${resource.name}" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlertPolicy_Documentation_Link) Reset() { + *x = AlertPolicy_Documentation_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation_Link) ProtoMessage() {} + +func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *AlertPolicy_Documentation_Link) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy_Documentation_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + // Specifies how many time series must fail a predicate to trigger a // condition. If not specified, then a `{count: 1}` trigger is used. type AlertPolicy_Condition_Trigger struct { @@ -805,7 +878,7 @@ type AlertPolicy_Condition_Trigger struct { func (x *AlertPolicy_Condition_Trigger) Reset() { *x = AlertPolicy_Condition_Trigger{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -818,7 +891,7 @@ func (x *AlertPolicy_Condition_Trigger) String() string { func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -969,7 +1042,7 @@ type AlertPolicy_Condition_MetricThreshold struct { func (x *AlertPolicy_Condition_MetricThreshold) Reset() { *x = AlertPolicy_Condition_MetricThreshold{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -982,7 +1055,7 @@ func (x *AlertPolicy_Condition_MetricThreshold) String() string { func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1120,7 +1193,7 @@ type AlertPolicy_Condition_MetricAbsence struct { func (x *AlertPolicy_Condition_MetricAbsence) Reset() { *x = AlertPolicy_Condition_MetricAbsence{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1133,7 +1206,7 @@ func (x *AlertPolicy_Condition_MetricAbsence) String() string { func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1207,7 +1280,7 @@ type AlertPolicy_Condition_LogMatch struct { func (x *AlertPolicy_Condition_LogMatch) Reset() { *x = AlertPolicy_Condition_LogMatch{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1220,7 +1293,7 @@ func (x *AlertPolicy_Condition_LogMatch) String() string { func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {} func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1286,7 +1359,7 @@ type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct { func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() { *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1299,7 +1372,7 @@ func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {} func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1437,7 +1510,7 @@ type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct { func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() { *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1450,7 +1523,7 @@ func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {} func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1527,7 +1600,7 @@ type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct { func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() { *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1540,7 +1613,7 @@ func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {} func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1577,7 +1650,7 @@ type AlertPolicy_AlertStrategy_NotificationRateLimit struct { func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() { *x = AlertPolicy_AlertStrategy_NotificationRateLimit{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[14] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1590,7 +1663,7 @@ func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string { func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {} func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[14] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1636,7 +1709,7 @@ type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() { *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1649,7 +1722,7 @@ func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {} func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1699,7 +1772,7 @@ var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x29, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x2b, 0x0a, 0x0b, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, @@ -1754,301 +1827,310 @@ var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x65, - 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6d, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x69, - 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, - 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, - 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, - 0x6e, 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, - 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, - 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, - 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, - 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, - 0x12, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, - 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, - 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, - 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, - 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, - 0x74, 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, - 0x74, 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, - 0x1a, 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, - 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, - 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xf3, + 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, + 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, - 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, 0x0a, + 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, + 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, - 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, - 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, + 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, - 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, - 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, - 0x6c, 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, - 0x23, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, - 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, - 0x41, 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, - 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, - 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, - 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, - 0x50, 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, - 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, - 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, - 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, - 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, - 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, - 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, - 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, - 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, - 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x1a, + 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, + 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, - 0x44, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, - 0x4e, 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, - 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, - 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x7d, 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, - 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, 0x20, + 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x23, + 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x45, + 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, + 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, + 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, - 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x42, 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, - 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, - 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2064,8 +2146,8 @@ func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_alert_proto_goTypes = []any{ (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData @@ -2074,68 +2156,70 @@ var file_google_monitoring_v3_alert_proto_goTypes = []interface{}{ (*AlertPolicy_Condition)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition (*AlertPolicy_AlertStrategy)(nil), // 6: google.monitoring.v3.AlertPolicy.AlertStrategy nil, // 7: google.monitoring.v3.AlertPolicy.UserLabelsEntry - (*AlertPolicy_Condition_Trigger)(nil), // 8: google.monitoring.v3.AlertPolicy.Condition.Trigger - (*AlertPolicy_Condition_MetricThreshold)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - (*AlertPolicy_Condition_MetricAbsence)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - (*AlertPolicy_Condition_LogMatch)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.LogMatch - (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition - (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition - (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions - nil, // 15: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry - nil, // 16: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry - (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit - (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy - (*wrapperspb.BoolValue)(nil), // 19: google.protobuf.BoolValue - (*status.Status)(nil), // 20: google.rpc.Status - (*MutationRecord)(nil), // 21: google.monitoring.v3.MutationRecord - (*durationpb.Duration)(nil), // 22: google.protobuf.Duration - (*Aggregation)(nil), // 23: google.monitoring.v3.Aggregation - (ComparisonType)(0), // 24: google.monitoring.v3.ComparisonType + (*AlertPolicy_Documentation_Link)(nil), // 8: google.monitoring.v3.AlertPolicy.Documentation.Link + (*AlertPolicy_Condition_Trigger)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.Trigger + (*AlertPolicy_Condition_MetricThreshold)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + (*AlertPolicy_Condition_MetricAbsence)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + (*AlertPolicy_Condition_LogMatch)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.LogMatch + (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + nil, // 16: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + nil, // 17: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*status.Status)(nil), // 21: google.rpc.Status + (*MutationRecord)(nil), // 22: google.monitoring.v3.MutationRecord + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (*Aggregation)(nil), // 24: google.monitoring.v3.Aggregation + (ComparisonType)(0), // 25: google.monitoring.v3.ComparisonType } var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ 4, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation 7, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry 5, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType - 19, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue - 20, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status - 21, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord - 21, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord + 20, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue + 21, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status + 22, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord + 22, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord 6, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity - 9, // 10: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold - 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence - 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch - 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition - 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition - 17, // 15: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit - 22, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration - 18, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy - 23, // 18: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation - 23, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation - 14, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions - 24, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType - 22, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration - 8, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 2, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData - 23, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation - 22, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration - 8, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 15, // 28: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry - 22, // 29: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration - 8, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger - 2, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData - 22, // 32: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration - 22, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration - 16, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry - 22, // 35: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration - 22, // 36: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration - 22, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration - 38, // [38:38] is the sub-list for method output_type - 38, // [38:38] is the sub-list for method input_type - 38, // [38:38] is the sub-list for extension type_name - 38, // [38:38] is the sub-list for extension extendee - 0, // [0:38] is the sub-list for field type_name + 8, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link + 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch + 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + 14, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + 18, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + 23, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration + 19, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + 24, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation + 24, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation + 15, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + 25, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType + 23, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration + 9, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 24, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation + 23, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration + 9, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 16, // 29: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + 23, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 9, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 23, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 23, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration + 17, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + 23, // 36: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration + 23, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration + 23, // 38: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_google_monitoring_v3_alert_proto_init() } @@ -2146,7 +2230,7 @@ func file_google_monitoring_v3_alert_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_mutation_record_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy); i { case 0: return &v.state @@ -2158,7 +2242,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Documentation); i { case 0: return &v.state @@ -2170,7 +2254,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition); i { case 0: return &v.state @@ -2182,7 +2266,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy); i { case 0: return &v.state @@ -2194,7 +2278,19 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_Trigger); i { case 0: return &v.state @@ -2206,7 +2302,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { case 0: return &v.state @@ -2218,7 +2314,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { case 0: return &v.state @@ -2230,7 +2326,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_LogMatch); i { case 0: return &v.state @@ -2242,7 +2338,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MonitoringQueryLanguageCondition); i { case 0: return &v.state @@ -2254,7 +2350,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_PrometheusQueryLanguageCondition); i { case 0: return &v.state @@ -2266,7 +2362,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_Condition_MetricThreshold_ForecastOptions); i { case 0: return &v.state @@ -2278,7 +2374,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy_NotificationRateLimit); i { case 0: return &v.state @@ -2290,7 +2386,7 @@ func file_google_monitoring_v3_alert_proto_init() { return nil } } - file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*AlertPolicy_AlertStrategy_NotificationChannelStrategy); i { case 0: return &v.state @@ -2303,14 +2399,14 @@ func file_google_monitoring_v3_alert_proto_init() { } } } - file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{ (*AlertPolicy_Condition_ConditionThreshold)(nil), (*AlertPolicy_Condition_ConditionAbsent)(nil), (*AlertPolicy_Condition_ConditionMatchedLog)(nil), (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil), (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil), } - file_google_monitoring_v3_alert_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{ (*AlertPolicy_Condition_Trigger_Count)(nil), (*AlertPolicy_Condition_Trigger_Percent)(nil), } @@ -2320,7 +2416,7 @@ func file_google_monitoring_v3_alert_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, NumEnums: 3, - NumMessages: 16, + NumMessages: 17, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index 07891b2bbb2..f0e149d16b6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/alert_service.proto package monitoringpb @@ -647,7 +647,7 @@ func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_google_monitoring_v3_alert_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_alert_service_proto_goTypes = []any{ (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest @@ -687,7 +687,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { } file_google_monitoring_v3_alert_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateAlertPolicyRequest); i { case 0: return &v.state @@ -699,7 +699,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetAlertPolicyRequest); i { case 0: return &v.state @@ -711,7 +711,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListAlertPoliciesRequest); i { case 0: return &v.state @@ -723,7 +723,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListAlertPoliciesResponse); i { case 0: return &v.state @@ -735,7 +735,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateAlertPolicyRequest); i { case 0: return &v.state @@ -747,7 +747,7 @@ func file_google_monitoring_v3_alert_service_proto_init() { return nil } } - file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteAlertPolicyRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index befc5dd2f83..c9aa5a02472 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/common.proto package monitoringpb @@ -1037,20 +1037,20 @@ var file_google_monitoring_v3_common_proto_rawDesc = []byte{ 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, - 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, + 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, - 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1067,7 +1067,7 @@ func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_monitoring_v3_common_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_common_proto_goTypes = []any{ (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner @@ -1099,7 +1099,7 @@ func file_google_monitoring_v3_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*TypedValue); i { case 0: return &v.state @@ -1111,7 +1111,7 @@ func file_google_monitoring_v3_common_proto_init() { return nil } } - file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*TimeInterval); i { case 0: return &v.state @@ -1123,7 +1123,7 @@ func file_google_monitoring_v3_common_proto_init() { return nil } } - file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Aggregation); i { case 0: return &v.state @@ -1136,7 +1136,7 @@ func file_google_monitoring_v3_common_proto_init() { } } } - file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{ (*TypedValue_BoolValue)(nil), (*TypedValue_Int64Value)(nil), (*TypedValue_DoubleValue)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index 4c2dd819f8d..7b1dc962da8 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/dropped_labels.proto package monitoringpb @@ -144,7 +144,7 @@ func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_monitoring_v3_dropped_labels_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{ (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry } @@ -163,7 +163,7 @@ func file_google_monitoring_v3_dropped_labels_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DroppedLabels); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index 0e9551b592b..dff27f9d8ce 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/group.proto package monitoringpb @@ -214,7 +214,7 @@ func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_group_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_group_proto_goTypes = []any{ (*Group)(nil), // 0: google.monitoring.v3.Group } var file_google_monitoring_v3_group_proto_depIdxs = []int32{ @@ -231,7 +231,7 @@ func file_google_monitoring_v3_group_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Group); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index ac831c9bf46..46747d90643 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/group_service.proto package monitoringpb @@ -875,7 +875,7 @@ func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_group_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_group_service_proto_goTypes = []any{ (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest @@ -922,7 +922,7 @@ func file_google_monitoring_v3_group_service_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_group_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListGroupsRequest); i { case 0: return &v.state @@ -934,7 +934,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListGroupsResponse); i { case 0: return &v.state @@ -946,7 +946,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetGroupRequest); i { case 0: return &v.state @@ -958,7 +958,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateGroupRequest); i { case 0: return &v.state @@ -970,7 +970,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateGroupRequest); i { case 0: return &v.state @@ -982,7 +982,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteGroupRequest); i { case 0: return &v.state @@ -994,7 +994,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListGroupMembersRequest); i { case 0: return &v.state @@ -1006,7 +1006,7 @@ func file_google_monitoring_v3_group_service_proto_init() { return nil } } - file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ListGroupMembersResponse); i { case 0: return &v.state @@ -1019,7 +1019,7 @@ func file_google_monitoring_v3_group_service_proto_init() { } } } - file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{ (*ListGroupsRequest_ChildrenOfGroup)(nil), (*ListGroupsRequest_AncestorsOfGroup)(nil), (*ListGroupsRequest_DescendantsOfGroup)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 17f31170515..b22c22d07e5 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/metric.proto package monitoringpb @@ -992,7 +992,7 @@ func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_google_monitoring_v3_metric_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_metric_proto_goTypes = []any{ (*Point)(nil), // 0: google.monitoring.v3.Point (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor @@ -1047,7 +1047,7 @@ func file_google_monitoring_v3_metric_proto_init() { } file_google_monitoring_v3_common_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Point); i { case 0: return &v.state @@ -1059,7 +1059,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*TimeSeries); i { case 0: return &v.state @@ -1071,7 +1071,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesDescriptor); i { case 0: return &v.state @@ -1083,7 +1083,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesData); i { case 0: return &v.state @@ -1095,7 +1095,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LabelValue); i { case 0: return &v.state @@ -1107,7 +1107,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*QueryError); i { case 0: return &v.state @@ -1119,7 +1119,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TextLocator); i { case 0: return &v.state @@ -1131,7 +1131,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { case 0: return &v.state @@ -1143,7 +1143,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesData_PointData); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_google_monitoring_v3_metric_proto_init() { return nil } } - file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*TextLocator_Position); i { case 0: return &v.state @@ -1168,7 +1168,7 @@ func file_google_monitoring_v3_metric_proto_init() { } } } - file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{ (*LabelValue_BoolValue)(nil), (*LabelValue_Int64Value)(nil), (*LabelValue_StringValue)(nil), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 39d689b3e76..52e1c1e0b9b 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/metric_service.proto package monitoringpb @@ -1765,7 +1765,7 @@ func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_google_monitoring_v3_metric_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_metric_service_proto_goTypes = []any{ (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse @@ -1847,7 +1847,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_metric_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { case 0: return &v.state @@ -1859,7 +1859,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { case 0: return &v.state @@ -1871,7 +1871,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetMonitoredResourceDescriptorRequest); i { case 0: return &v.state @@ -1883,7 +1883,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListMetricDescriptorsRequest); i { case 0: return &v.state @@ -1895,7 +1895,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListMetricDescriptorsResponse); i { case 0: return &v.state @@ -1907,7 +1907,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GetMetricDescriptorRequest); i { case 0: return &v.state @@ -1919,7 +1919,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*CreateMetricDescriptorRequest); i { case 0: return &v.state @@ -1931,7 +1931,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteMetricDescriptorRequest); i { case 0: return &v.state @@ -1943,7 +1943,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ListTimeSeriesRequest); i { case 0: return &v.state @@ -1955,7 +1955,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListTimeSeriesResponse); i { case 0: return &v.state @@ -1967,7 +1967,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesRequest); i { case 0: return &v.state @@ -1979,7 +1979,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesError); i { case 0: return &v.state @@ -1991,7 +1991,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesSummary); i { case 0: return &v.state @@ -2003,7 +2003,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*QueryTimeSeriesRequest); i { case 0: return &v.state @@ -2015,7 +2015,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*QueryTimeSeriesResponse); i { case 0: return &v.state @@ -2027,7 +2027,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*QueryErrorList); i { case 0: return &v.state @@ -2039,7 +2039,7 @@ func file_google_monitoring_v3_metric_service_proto_init() { return nil } } - file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*CreateTimeSeriesSummary_Error); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index df96482951d..643b244e4d3 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/mutation_record.proto package monitoringpb @@ -139,7 +139,7 @@ func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_mutation_record_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{ (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp } @@ -158,7 +158,7 @@ func file_google_monitoring_v3_mutation_record_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*MutationRecord); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index ac5e389cb7e..603b5bcdde1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/notification.proto package monitoringpb @@ -563,7 +563,7 @@ func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_monitoring_v3_notification_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_notification_proto_goTypes = []any{ (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel @@ -600,7 +600,7 @@ func file_google_monitoring_v3_notification_proto_init() { file_google_monitoring_v3_common_proto_init() file_google_monitoring_v3_mutation_record_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*NotificationChannelDescriptor); i { case 0: return &v.state @@ -612,7 +612,7 @@ func file_google_monitoring_v3_notification_proto_init() { return nil } } - file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*NotificationChannel); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 11f568083da..ac7bafd1f10 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/notification_service.proto package monitoringpb @@ -1242,7 +1242,7 @@ func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_monitoring_v3_notification_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_notification_service_proto_goTypes = []any{ (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest @@ -1304,7 +1304,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { } file_google_monitoring_v3_notification_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelDescriptorsRequest); i { case 0: return &v.state @@ -1316,7 +1316,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelDescriptorsResponse); i { case 0: return &v.state @@ -1328,7 +1328,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelDescriptorRequest); i { case 0: return &v.state @@ -1340,7 +1340,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateNotificationChannelRequest); i { case 0: return &v.state @@ -1352,7 +1352,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelsRequest); i { case 0: return &v.state @@ -1364,7 +1364,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationChannelsResponse); i { case 0: return &v.state @@ -1376,7 +1376,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelRequest); i { case 0: return &v.state @@ -1388,7 +1388,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*UpdateNotificationChannelRequest); i { case 0: return &v.state @@ -1400,7 +1400,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteNotificationChannelRequest); i { case 0: return &v.state @@ -1412,7 +1412,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { case 0: return &v.state @@ -1424,7 +1424,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { case 0: return &v.state @@ -1436,7 +1436,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { case 0: return &v.state @@ -1448,7 +1448,7 @@ func file_google_monitoring_v3_notification_service_proto_init() { return nil } } - file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*VerifyNotificationChannelRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index 210b1509825..e9bfbd68f53 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/query_service.proto package monitoringpb @@ -90,7 +90,7 @@ var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_google_monitoring_v3_query_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_query_service_proto_goTypes = []any{ (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse } diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index 240d5725cb9..869a3738c09 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/service.proto package monitoringpb @@ -108,7 +108,7 @@ type Service struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource name for this Service. The format is: + // Identifier. Resource name for this Service. The format is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -124,7 +124,16 @@ type Service struct { // *Service_ClusterIstio_ // *Service_MeshIstio_ // *Service_IstioCanonicalService_ + // *Service_CloudRun_ + // *Service_GkeNamespace_ + // *Service_GkeWorkload_ + // *Service_GkeService_ Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Message that contains the service type and service labels of this service + // if it is a basic service. + // Documentation and examples + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"` // Configuration for how to query telemetry on a Service. Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` // Labels which have been used to annotate the service. Label keys must start @@ -231,6 +240,41 @@ func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService { return nil } +func (x *Service) GetCloudRun() *Service_CloudRun { + if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok { + return x.CloudRun + } + return nil +} + +func (x *Service) GetGkeNamespace() *Service_GkeNamespace { + if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok { + return x.GkeNamespace + } + return nil +} + +func (x *Service) GetGkeWorkload() *Service_GkeWorkload { + if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok { + return x.GkeWorkload + } + return nil +} + +func (x *Service) GetGkeService() *Service_GkeService { + if x, ok := x.GetIdentifier().(*Service_GkeService_); ok { + return x.GkeService + } + return nil +} + +func (x *Service) GetBasicService() *Service_BasicService { + if x != nil { + return x.BasicService + } + return nil +} + func (x *Service) GetTelemetry() *Service_Telemetry { if x != nil { return x.Telemetry @@ -281,6 +325,26 @@ type Service_IstioCanonicalService_ struct { IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"` } +type Service_CloudRun_ struct { + // Type used for Cloud Run services. + CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"` +} + +type Service_GkeNamespace_ struct { + // Type used for GKE Namespaces. + GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"` +} + +type Service_GkeWorkload_ struct { + // Type used for GKE Workloads. + GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"` +} + +type Service_GkeService_ struct { + // Type used for GKE Services (the Kubernetes concept of a service). + GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"` +} + func (*Service_Custom_) isService_Identifier() {} func (*Service_AppEngine_) isService_Identifier() {} @@ -293,6 +357,14 @@ func (*Service_MeshIstio_) isService_Identifier() {} func (*Service_IstioCanonicalService_) isService_Identifier() {} +func (*Service_CloudRun_) isService_Identifier() {} + +func (*Service_GkeNamespace_) isService_Identifier() {} + +func (*Service_GkeWorkload_) isService_Identifier() {} + +func (*Service_GkeService_) isService_Identifier() {} + // A Service-Level Objective (SLO) describes a level of desired good service. It // consists of a service-level indicator (SLI), a performance goal, and a period // over which the objective is to be evaluated against that goal. The SLO can @@ -304,7 +376,7 @@ type ServiceLevelObjective struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource name for this `ServiceLevelObjective`. The format is: + // Identifier. Resource name for this `ServiceLevelObjective`. The format is: // // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1117,8 +1189,9 @@ func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} -// Custom view of service telemetry. Currently a place-holder pending final -// design. +// Use a custom service to designate a service that you want to monitor +// when none of the other service types (like App Engine, Cloud Run, or +// a GKE type) matches your intended service. type Service_Custom struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1164,8 +1237,8 @@ type Service_AppEngine struct { unknownFields protoimpl.UnknownFields // The ID of the App Engine module underlying this service. Corresponds to - // the `module_id` resource label in the `gae_app` monitored resource: - // https://cloud.google.com/monitoring/api/resources#tag_gae_app + // the `module_id` resource label in the [`gae_app` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app). ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` } @@ -1215,8 +1288,8 @@ type Service_CloudEndpoints struct { unknownFields protoimpl.UnknownFields // The name of the Cloud Endpoints service underlying this service. - // Corresponds to the `service` resource label in the `api` monitored - // resource: https://cloud.google.com/monitoring/api/resources#tag_api + // Corresponds to the `service` resource label in the [`api` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_api). Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` } @@ -1490,6 +1563,404 @@ func (x *Service_IstioCanonicalService) GetCanonicalService() string { return "" } +// Cloud Run service. Learn more at https://cloud.google.com/run. +type Service_CloudRun struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Run service. Corresponds to the `service_name` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The location the service is run. Corresponds to the `location` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Service_CloudRun) Reset() { + *x = Service_CloudRun{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudRun) ProtoMessage() {} + +func (x *Service_CloudRun) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead. +func (*Service_CloudRun) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *Service_CloudRun) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *Service_CloudRun) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +// GKE Namespace. The field names correspond to the resource metadata labels +// on monitored resources that fall under a namespace (for example, +// `k8s_container` or `k8s_pod`). +type Service_GkeNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of this namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` +} + +func (x *Service_GkeNamespace) Reset() { + *x = Service_GkeNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeNamespace) ProtoMessage() {} + +func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead. +func (*Service_GkeNamespace) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *Service_GkeNamespace) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeNamespace) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeNamespace) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeNamespace) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond +// to the metadata labels on monitored resources that fall under a workload +// (for example, `k8s_container` or `k8s_pod`). +type Service_GkeWorkload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The type of this workload (for example, "Deployment" or "DaemonSet") + TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"` + // The name of this workload. + TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"` +} + +func (x *Service_GkeWorkload) Reset() { + *x = Service_GkeWorkload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeWorkload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeWorkload) ProtoMessage() {} + +func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead. +func (*Service_GkeWorkload) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Service_GkeWorkload) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeWorkload) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeWorkload) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeWorkload) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerType() string { + if x != nil { + return x.TopLevelControllerType + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerName() string { + if x != nil { + return x.TopLevelControllerName + } + return "" +} + +// GKE Service. The "service" here represents a +// [Kubernetes service +// object](https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on [`k8s_service` +// monitored +// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type Service_GkeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The name of this service. + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_GkeService) Reset() { + *x = Service_GkeService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeService) ProtoMessage() {} + +func (x *Service_GkeService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead. +func (*Service_GkeService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Service_GkeService) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeService) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeService) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeService) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeService) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// A well-known service type, defined by its service type and service labels. +// Documentation and examples +// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type Service_BasicService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of service that this basic service defines, e.g. + // APP_ENGINE service type. + // Documentation and valid values + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Labels that specify the resource that emits the monitoring data which + // is used for SLO reporting of this `Service`. + // Documentation and valid values for given service types + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service_BasicService) Reset() { + *x = Service_BasicService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_BasicService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_BasicService) ProtoMessage() {} + +func (x *Service_BasicService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead. +func (*Service_BasicService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Service_BasicService) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *Service_BasicService) GetServiceLabels() map[string]string { + if x != nil { + return x.ServiceLabels + } + return nil +} + // Configuration for how to query telemetry on a Service. type Service_Telemetry struct { state protoimpl.MessageState @@ -1504,7 +1975,7 @@ type Service_Telemetry struct { func (x *Service_Telemetry) Reset() { *x = Service_Telemetry{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1517,7 +1988,7 @@ func (x *Service_Telemetry) String() string { func (*Service_Telemetry) ProtoMessage() {} func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1530,7 +2001,7 @@ func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { // Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. func (*Service_Telemetry) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11} } func (x *Service_Telemetry) GetResourceName() string { @@ -1550,7 +2021,7 @@ type BasicSli_AvailabilityCriteria struct { func (x *BasicSli_AvailabilityCriteria) Reset() { *x = BasicSli_AvailabilityCriteria{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1563,7 +2034,7 @@ func (x *BasicSli_AvailabilityCriteria) String() string { func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1593,7 +2064,7 @@ type BasicSli_LatencyCriteria struct { func (x *BasicSli_LatencyCriteria) Reset() { *x = BasicSli_LatencyCriteria{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1606,7 +2077,7 @@ func (x *BasicSli_LatencyCriteria) String() string { func (*BasicSli_LatencyCriteria) ProtoMessage() {} func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1651,7 +2122,7 @@ type WindowsBasedSli_PerformanceThreshold struct { func (x *WindowsBasedSli_PerformanceThreshold) Reset() { *x = WindowsBasedSli_PerformanceThreshold{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1664,7 +2135,7 @@ func (x *WindowsBasedSli_PerformanceThreshold) String() string { func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1748,7 +2219,7 @@ type WindowsBasedSli_MetricRange struct { func (x *WindowsBasedSli_MetricRange) Reset() { *x = WindowsBasedSli_MetricRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_service_proto_msgTypes[21] + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1761,7 +2232,7 @@ func (x *WindowsBasedSli_MetricRange) String() string { func (*WindowsBasedSli_MetricRange) ProtoMessage() {} func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_service_proto_msgTypes[21] + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1797,302 +2268,386 @@ var file_google_monitoring_v3_service_proto_rawDesc = []byte{ 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0c, 0x0a, 0x07, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, - 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, - 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, + 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, + 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, - 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, - 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, - 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, - 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, - 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, + 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, - 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, - 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x1a, 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, - 0x69, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, + 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, - 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, - 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, - 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, - 0x0a, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, - 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x30, 0x0a, 0x09, 0x54, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, - 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, - 0x41, 0xa3, 0x01, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, - 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x22, 0xfd, 0x06, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, - 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, 0x42, - 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, - 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x65, - 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, - 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, - 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, 0x12, - 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, 0x02, - 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, - 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, - 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x3d, - 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, - 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, 0x0a, - 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, 0x6e, - 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x16, - 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0c, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a, + 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, + 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, + 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, + 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, + 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, + 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, - 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, - 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, 0x4c, - 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x37, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, 0x63, - 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, 0x64, - 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, 0x6f, - 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, 0x64, - 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, 0x0f, - 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x42, - 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, 0x0a, - 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, 0x64, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, - 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, 0x0a, - 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, - 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, - 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, - 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x64, - 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x64, - 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, - 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x48, - 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, - 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, + 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, + 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, + 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, + 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, + 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, + 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, 0x69, - 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, - 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x70, - 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, 0x0a, - 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, 0x72, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, 0x69, - 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, - 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, 0x63, - 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, 0x18, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, - 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, - 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, - 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, - 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, - 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2108,8 +2663,8 @@ func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_monitoring_v3_service_proto_goTypes = []any{ (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View (*Service)(nil), // 1: google.monitoring.v3.Service (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective @@ -2126,15 +2681,21 @@ var file_google_monitoring_v3_service_proto_goTypes = []interface{}{ (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService - (*Service_Telemetry)(nil), // 16: google.monitoring.v3.Service.Telemetry - nil, // 17: google.monitoring.v3.Service.UserLabelsEntry - nil, // 18: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry - (*BasicSli_AvailabilityCriteria)(nil), // 19: google.monitoring.v3.BasicSli.AvailabilityCriteria - (*BasicSli_LatencyCriteria)(nil), // 20: google.monitoring.v3.BasicSli.LatencyCriteria - (*WindowsBasedSli_PerformanceThreshold)(nil), // 21: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - (*WindowsBasedSli_MetricRange)(nil), // 22: google.monitoring.v3.WindowsBasedSli.MetricRange - (*durationpb.Duration)(nil), // 23: google.protobuf.Duration - (calendarperiod.CalendarPeriod)(0), // 24: google.type.CalendarPeriod + (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun + (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace + (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload + (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService + (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService + (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry + nil, // 22: google.monitoring.v3.Service.UserLabelsEntry + nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria + (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria + (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod } var file_google_monitoring_v3_service_proto_depIdxs = []int32{ 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom @@ -2143,33 +2704,39 @@ var file_google_monitoring_v3_service_proto_depIdxs = []int32{ 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService - 16, // 6: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry - 17, // 7: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry - 3, // 8: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator - 23, // 9: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration - 24, // 10: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod - 18, // 11: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry - 4, // 12: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli - 6, // 13: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli - 9, // 14: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli - 19, // 15: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria - 20, // 16: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria - 7, // 17: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio - 8, // 18: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut - 5, // 19: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range - 21, // 20: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold - 22, // 21: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 22, // 22: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange - 23, // 23: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration - 23, // 24: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration - 6, // 25: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli - 4, // 26: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli - 5, // 27: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun + 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace + 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload + 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService + 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService + 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry + 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry + 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator + 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration + 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod + 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli + 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli + 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli + 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria + 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria + 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio + 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut + 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range + 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration + 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration + 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli + 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli + 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name } func init() { file_google_monitoring_v3_service_proto_init() } @@ -2178,7 +2745,7 @@ func file_google_monitoring_v3_service_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Service); i { case 0: return &v.state @@ -2190,7 +2757,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ServiceLevelObjective); i { case 0: return &v.state @@ -2202,7 +2769,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServiceLevelIndicator); i { case 0: return &v.state @@ -2214,7 +2781,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*BasicSli); i { case 0: return &v.state @@ -2226,7 +2793,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Range); i { case 0: return &v.state @@ -2238,7 +2805,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RequestBasedSli); i { case 0: return &v.state @@ -2250,7 +2817,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TimeSeriesRatio); i { case 0: return &v.state @@ -2262,7 +2829,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DistributionCut); i { case 0: return &v.state @@ -2274,7 +2841,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli); i { case 0: return &v.state @@ -2286,7 +2853,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*Service_Custom); i { case 0: return &v.state @@ -2298,7 +2865,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*Service_AppEngine); i { case 0: return &v.state @@ -2310,7 +2877,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*Service_CloudEndpoints); i { case 0: return &v.state @@ -2322,7 +2889,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Service_ClusterIstio); i { case 0: return &v.state @@ -2334,7 +2901,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Service_MeshIstio); i { case 0: return &v.state @@ -2346,7 +2913,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*Service_IstioCanonicalService); i { case 0: return &v.state @@ -2358,7 +2925,67 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudRun); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeWorkload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Service_BasicService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*Service_Telemetry); i { case 0: return &v.state @@ -2370,7 +2997,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*BasicSli_AvailabilityCriteria); i { case 0: return &v.state @@ -2382,7 +3009,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*BasicSli_LatencyCriteria); i { case 0: return &v.state @@ -2394,7 +3021,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { case 0: return &v.state @@ -2406,7 +3033,7 @@ func file_google_monitoring_v3_service_proto_init() { return nil } } - file_google_monitoring_v3_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*WindowsBasedSli_MetricRange); i { case 0: return &v.state @@ -2419,38 +3046,42 @@ func file_google_monitoring_v3_service_proto_init() { } } } - file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{ (*Service_Custom_)(nil), (*Service_AppEngine_)(nil), (*Service_CloudEndpoints_)(nil), (*Service_ClusterIstio_)(nil), (*Service_MeshIstio_)(nil), (*Service_IstioCanonicalService_)(nil), + (*Service_CloudRun_)(nil), + (*Service_GkeNamespace_)(nil), + (*Service_GkeWorkload_)(nil), + (*Service_GkeService_)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{ (*ServiceLevelObjective_RollingPeriod)(nil), (*ServiceLevelObjective_CalendarPeriod)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{ (*ServiceLevelIndicator_BasicSli)(nil), (*ServiceLevelIndicator_RequestBased)(nil), (*ServiceLevelIndicator_WindowsBased)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{ (*BasicSli_Availability)(nil), (*BasicSli_Latency)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{ (*RequestBasedSli_GoodTotalRatio)(nil), (*RequestBasedSli_DistributionCut)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{ (*WindowsBasedSli_GoodBadMetricFilter)(nil), (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), (*WindowsBasedSli_MetricMeanInRange)(nil), (*WindowsBasedSli_MetricSumInRange)(nil), } - file_google_monitoring_v3_service_proto_msgTypes[20].OneofWrappers = []interface{}{ + file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{ (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), } @@ -2460,7 +3091,7 @@ func file_google_monitoring_v3_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, NumEnums: 1, - NumMessages: 22, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index 18e0c6a6429..15e1f04d6a5 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/service_service.proto package monitoringpb @@ -1142,7 +1142,7 @@ func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_google_monitoring_v3_service_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_service_service_proto_goTypes = []any{ (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest @@ -1206,7 +1206,7 @@ func file_google_monitoring_v3_service_service_proto_init() { } file_google_monitoring_v3_service_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateServiceRequest); i { case 0: return &v.state @@ -1218,7 +1218,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetServiceRequest); i { case 0: return &v.state @@ -1230,7 +1230,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListServicesRequest); i { case 0: return &v.state @@ -1242,7 +1242,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListServicesResponse); i { case 0: return &v.state @@ -1254,7 +1254,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateServiceRequest); i { case 0: return &v.state @@ -1266,7 +1266,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteServiceRequest); i { case 0: return &v.state @@ -1278,7 +1278,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*CreateServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1290,7 +1290,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1302,7 +1302,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ListServiceLevelObjectivesRequest); i { case 0: return &v.state @@ -1314,7 +1314,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListServiceLevelObjectivesResponse); i { case 0: return &v.state @@ -1326,7 +1326,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*UpdateServiceLevelObjectiveRequest); i { case 0: return &v.state @@ -1338,7 +1338,7 @@ func file_google_monitoring_v3_service_service_proto_init() { return nil } } - file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*DeleteServiceLevelObjectiveRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go index f27c9bbe2cc..ab49868045d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/snooze.proto package monitoringpb @@ -247,7 +247,7 @@ func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_monitoring_v3_snooze_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_snooze_proto_goTypes = []any{ (*Snooze)(nil), // 0: google.monitoring.v3.Snooze (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval @@ -269,7 +269,7 @@ func file_google_monitoring_v3_snooze_proto_init() { } file_google_monitoring_v3_common_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Snooze); i { case 0: return &v.state @@ -281,7 +281,7 @@ func file_google_monitoring_v3_snooze_proto_init() { return nil } } - file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Snooze_Criteria); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go index caa89ac32a4..39388a99828 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/snooze_service.proto package monitoringpb @@ -545,7 +545,7 @@ func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_monitoring_v3_snooze_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{ (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse @@ -581,7 +581,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { } file_google_monitoring_v3_snooze_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*CreateSnoozeRequest); i { case 0: return &v.state @@ -593,7 +593,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListSnoozesRequest); i { case 0: return &v.state @@ -605,7 +605,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListSnoozesResponse); i { case 0: return &v.state @@ -617,7 +617,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetSnoozeRequest); i { case 0: return &v.state @@ -629,7 +629,7 @@ func file_google_monitoring_v3_snooze_service_proto_init() { return nil } } - file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateSnoozeRequest); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index 7b7b7533cef..5a55ecc6650 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/span_context.proto package monitoringpb @@ -137,7 +137,7 @@ func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_monitoring_v3_span_context_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_span_context_proto_goTypes = []any{ (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext } var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ @@ -154,7 +154,7 @@ func file_google_monitoring_v3_span_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SpanContext); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 69124603ba7..e0b9e4a385a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/uptime.proto package monitoringpb @@ -293,7 +293,7 @@ func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber { // Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead. func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} } // The HTTP request method options. @@ -346,7 +346,7 @@ func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNum // Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} } // Header options corresponding to the content type of a HTTP request body. @@ -402,7 +402,7 @@ func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumbe // Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} } // An HTTP status code class. @@ -471,7 +471,56 @@ func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() pro // Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead. func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0} +} + +// Type of authentication. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32 + +const ( + // Default value, will result in OIDC Authentication. + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0 + // OIDC Authentication + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType. +var ( + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{ + 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED", + 1: "OIDC_TOKEN", + } + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{ + "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0, + "OIDC_TOKEN": 1, + } +) + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[7] +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0} } // Options to perform content matching. @@ -542,11 +591,11 @@ func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { } func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() + return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() } func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[7] + return &file_google_monitoring_v3_uptime_proto_enumTypes[8] } func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { @@ -555,7 +604,7 @@ func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoref // Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} } // Options to perform JSONPath content matching. @@ -599,11 +648,11 @@ func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) } func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor { - return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() + return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor() } func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType { - return &file_google_monitoring_v3_uptime_proto_enumTypes[8] + return &file_google_monitoring_v3_uptime_proto_enumTypes[9] } func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber { @@ -612,7 +661,7 @@ func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) // Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead. func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0} } // An internal checker allows Uptime checks to run on private/internal GCP @@ -722,6 +771,77 @@ func (x *InternalChecker) GetState() InternalChecker_State { return InternalChecker_UNSPECIFIED } +// Describes a Synthetic Monitor to be invoked by Uptime. +type SyntheticMonitorTarget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a Synthetic Monitor's execution stack. + // + // Types that are assignable to Target: + // + // *SyntheticMonitorTarget_CloudFunctionV2 + Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"` +} + +func (x *SyntheticMonitorTarget) Reset() { + *x = SyntheticMonitorTarget{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget) ProtoMessage() {} + +func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target { + if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok { + return x.CloudFunctionV2 + } + return nil +} + +type isSyntheticMonitorTarget_Target interface { + isSyntheticMonitorTarget_Target() +} + +type SyntheticMonitorTarget_CloudFunctionV2 struct { + // Target a Synthetic Monitor GCFv2 instance. + CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"` +} + +func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {} + // This message configures which resources and services to monitor for // availability. type UptimeCheckConfig struct { @@ -729,7 +849,8 @@ type UptimeCheckConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // A unique resource name for this Uptime check configuration. The format is: + // Identifier. A unique resource name for this Uptime check configuration. The + // format is: // // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] // @@ -750,6 +871,7 @@ type UptimeCheckConfig struct { // // *UptimeCheckConfig_MonitoredResource // *UptimeCheckConfig_ResourceGroup_ + // *UptimeCheckConfig_SyntheticMonitor Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` // The type of Uptime check request. // @@ -807,7 +929,7 @@ type UptimeCheckConfig struct { func (x *UptimeCheckConfig) Reset() { *x = UptimeCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -820,7 +942,7 @@ func (x *UptimeCheckConfig) String() string { func (*UptimeCheckConfig) ProtoMessage() {} func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -833,7 +955,7 @@ func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} } func (x *UptimeCheckConfig) GetName() string { @@ -871,6 +993,13 @@ func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup return nil } +func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget { + if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok { + return x.SyntheticMonitor + } + return nil +} + func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { if m != nil { return m.CheckRequestType @@ -976,10 +1105,17 @@ type UptimeCheckConfig_ResourceGroup_ struct { ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` } +type UptimeCheckConfig_SyntheticMonitor struct { + // Specifies a Synthetic Monitor to invoke. + SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"` +} + func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} +func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {} + type isUptimeCheckConfig_CheckRequestType interface { isUptimeCheckConfig_CheckRequestType() } @@ -1022,7 +1158,7 @@ type UptimeCheckIp struct { func (x *UptimeCheckIp) Reset() { *x = UptimeCheckIp{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1035,7 +1171,7 @@ func (x *UptimeCheckIp) String() string { func (*UptimeCheckIp) ProtoMessage() {} func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1048,7 +1184,7 @@ func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. func (*UptimeCheckIp) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3} } func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { @@ -1072,6 +1208,69 @@ func (x *UptimeCheckIp) GetIpAddress() string { return "" } +// A Synthetic Monitor deployed to a Cloud Functions V2 instance. +type SyntheticMonitorTarget_CloudFunctionV2Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Fully qualified GCFv2 resource name + // i.e. `projects/{project}/locations/{location}/functions/{function}` + // Required. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The `cloud_run_revision` Monitored Resource associated with + // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and + // spans) are reported against this Monitored Resource. This field is output + // only. + CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"` +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() { + *x = SyntheticMonitorTarget_CloudFunctionV2Target{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource { + if x != nil { + return x.CloudRunRevision + } + return nil +} + // The resource submessage for group checks. It can be used instead of a // monitored resource, when multiple resources are being monitored. type UptimeCheckConfig_ResourceGroup struct { @@ -1090,7 +1289,7 @@ type UptimeCheckConfig_ResourceGroup struct { func (x *UptimeCheckConfig_ResourceGroup) Reset() { *x = UptimeCheckConfig_ResourceGroup{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1103,7 +1302,7 @@ func (x *UptimeCheckConfig_ResourceGroup) String() string { func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1116,7 +1315,7 @@ func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} } func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { @@ -1148,7 +1347,7 @@ type UptimeCheckConfig_PingConfig struct { func (x *UptimeCheckConfig_PingConfig) Reset() { *x = UptimeCheckConfig_PingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1161,7 +1360,7 @@ func (x *UptimeCheckConfig_PingConfig) String() string { func (*UptimeCheckConfig_PingConfig) ProtoMessage() {} func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1174,7 +1373,7 @@ func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1} } func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 { @@ -1208,6 +1407,7 @@ type UptimeCheckConfig_HttpCheck struct { Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // The authentication information. Optional when creating an HTTP check; // defaults to empty. + // Do not set both `auth_method` and `auth_info`. AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` // Boolean specifying whether to encrypt the header information. // Encryption should be specified for any headers related to authentication @@ -1262,12 +1462,20 @@ type UptimeCheckConfig_HttpCheck struct { AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"` // Contains information needed to add pings to an HTTP check. PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` + // This field is optional and should be set only by users interested in + // an authenticated uptime check. + // Do not set both `auth_method` and `auth_info`. + // + // Types that are assignable to AuthMethod: + // + // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ + AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"` } func (x *UptimeCheckConfig_HttpCheck) Reset() { *x = UptimeCheckConfig_HttpCheck{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1280,7 +1488,7 @@ func (x *UptimeCheckConfig_HttpCheck) String() string { func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1293,7 +1501,7 @@ func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2} } func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { @@ -1387,6 +1595,34 @@ func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingCon return nil } +func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod { + if m != nil { + return m.AuthMethod + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication { + if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok { + return x.ServiceAgentAuthentication + } + return nil +} + +type isUptimeCheckConfig_HttpCheck_AuthMethod interface { + isUptimeCheckConfig_HttpCheck_AuthMethod() +} + +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct { + // If specified, Uptime will generate and attach an OIDC JWT token for the + // Monitoring service agent service account as an `Authorization` header + // in the HTTP request when probing. + ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() { +} + // Information required for a TCP Uptime check request. type UptimeCheckConfig_TcpCheck struct { state protoimpl.MessageState @@ -1404,7 +1640,7 @@ type UptimeCheckConfig_TcpCheck struct { func (x *UptimeCheckConfig_TcpCheck) Reset() { *x = UptimeCheckConfig_TcpCheck{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1417,7 +1653,7 @@ func (x *UptimeCheckConfig_TcpCheck) String() string { func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1430,7 +1666,7 @@ func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 3} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3} } func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { @@ -1476,7 +1712,7 @@ type UptimeCheckConfig_ContentMatcher struct { func (x *UptimeCheckConfig_ContentMatcher) Reset() { *x = UptimeCheckConfig_ContentMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1489,7 +1725,7 @@ func (x *UptimeCheckConfig_ContentMatcher) String() string { func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1502,7 +1738,7 @@ func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { // Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4} } func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { @@ -1563,7 +1799,7 @@ type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1576,7 +1812,7 @@ func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1589,7 +1825,7 @@ func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protore // Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} } func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { @@ -1625,7 +1861,7 @@ type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() { *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[10] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1638,7 +1874,7 @@ func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string { func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {} func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[10] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1651,7 +1887,7 @@ func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoref // Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 2, 1} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} } func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode { @@ -1695,6 +1931,59 @@ func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheck func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { } +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// The OIDC token will be generated for the Monitoring service agent service +// account. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of authentication. + Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + if x != nil { + return x.Type + } + return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED +} + // Information needed to perform a JSONPath content match. // Used for `ContentMatcherOption::MATCHES_JSON_PATH` and // `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. @@ -1714,7 +2003,7 @@ type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct { func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() { *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1727,7 +2016,7 @@ func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string { func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {} func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message { - mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1740,7 +2029,7 @@ func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protor // Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead. func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) { - return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 4, 0} + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} } func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string { @@ -1789,174 +2078,227 @@ var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x8b, - 0x1f, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x52, 0x0a, 0x0a, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, + 0x02, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, + 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x32, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, + 0x65, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, + 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x1a, 0xef, 0x0e, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, + 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, - 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, - 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xc8, 0x0b, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, - 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, - 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, - 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, - 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, - 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, - 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, + 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, + 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, - 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, - 0x32, 0x58, 0x58, 0x10, 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, - 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, - 0x58, 0x10, 0x90, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, - 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, - 0xe8, 0x07, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, - 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, - 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, - 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, - 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, - 0x10, 0x02, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, + 0x1e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x31, 0x0a, 0x2d, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x10, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, + 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, + 0x44, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -2063,19 +2405,26 @@ var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, - 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, - 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, - 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, - 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0xaf, 0x02, 0xea, 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2090,65 +2439,74 @@ func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { return file_google_monitoring_v3_uptime_proto_rawDescData } -var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_google_monitoring_v3_uptime_proto_goTypes = []interface{}{ - (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion - (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType - (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State - (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType - (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass - (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 7: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption - (*InternalChecker)(nil), // 9: google.monitoring.v3.InternalChecker - (*UptimeCheckConfig)(nil), // 10: google.monitoring.v3.UptimeCheckConfig - (*UptimeCheckIp)(nil), // 11: google.monitoring.v3.UptimeCheckIp - (*UptimeCheckConfig_ResourceGroup)(nil), // 12: google.monitoring.v3.UptimeCheckConfig.ResourceGroup - (*UptimeCheckConfig_PingConfig)(nil), // 13: google.monitoring.v3.UptimeCheckConfig.PingConfig - (*UptimeCheckConfig_HttpCheck)(nil), // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck - (*UptimeCheckConfig_TcpCheck)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.TcpCheck - (*UptimeCheckConfig_ContentMatcher)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.ContentMatcher - nil, // 17: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry - (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode - nil, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher - (*monitoredres.MonitoredResource)(nil), // 22: google.api.MonitoredResource - (*durationpb.Duration)(nil), // 23: google.protobuf.Duration +var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_monitoring_v3_uptime_proto_goTypes = []any{ + (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion + (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType + (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State + (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType + (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker + (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget + (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig + (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp + (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup + (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig + (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck + (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck + (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher + nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration } var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State - 22, // 1: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource - 12, // 2: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup - 14, // 3: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck - 15, // 4: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck - 23, // 5: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration - 23, // 6: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration - 16, // 7: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher - 3, // 8: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType - 0, // 9: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion - 9, // 10: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker - 17, // 11: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry - 0, // 12: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion - 1, // 13: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType - 4, // 14: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod - 18, // 15: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication - 20, // 16: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry - 5, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType - 19, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode - 13, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig - 13, // 20: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig - 7, // 21: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption - 21, // 22: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher - 6, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass - 8, // 24: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption - 25, // [25:25] is the sub-list for method output_type - 25, // [25:25] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource + 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup + 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget + 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck + 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck + 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration + 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration + 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher + 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType + 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion + 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker + 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion + 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource + 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType + 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_google_monitoring_v3_uptime_proto_init() } @@ -2157,7 +2515,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*InternalChecker); i { case 0: return &v.state @@ -2169,7 +2527,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig); i { case 0: return &v.state @@ -2181,7 +2551,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckIp); i { case 0: return &v.state @@ -2193,7 +2563,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget_CloudFunctionV2Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ResourceGroup); i { case 0: return &v.state @@ -2205,7 +2587,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_PingConfig); i { case 0: return &v.state @@ -2217,7 +2599,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck); i { case 0: return &v.state @@ -2229,7 +2611,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_TcpCheck); i { case 0: return &v.state @@ -2241,7 +2623,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ContentMatcher); i { case 0: return &v.state @@ -2253,7 +2635,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { case 0: return &v.state @@ -2265,7 +2647,7 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_HttpCheck_ResponseStatusCode); i { case 0: return &v.state @@ -2277,7 +2659,19 @@ func file_google_monitoring_v3_uptime_proto_init() { return nil } } - file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher); i { case 0: return &v.state @@ -2290,16 +2684,23 @@ func file_google_monitoring_v3_uptime_proto_init() { } } } - file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{ + (*SyntheticMonitorTarget_CloudFunctionV2)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{ (*UptimeCheckConfig_MonitoredResource)(nil), (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_SyntheticMonitor)(nil), (*UptimeCheckConfig_HttpCheck_)(nil), (*UptimeCheckConfig_TcpCheck_)(nil), } - file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil), } - file_google_monitoring_v3_uptime_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil), (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil), } @@ -2308,8 +2709,8 @@ func file_google_monitoring_v3_uptime_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, - NumEnums: 9, - NumMessages: 13, + NumEnums: 10, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index d0ca8ee494f..d4ba902fb07 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/monitoring/v3/uptime_service.proto package monitoringpb @@ -778,7 +778,7 @@ func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { } var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_google_monitoring_v3_uptime_service_proto_goTypes = []interface{}{ +var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{ (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest @@ -824,7 +824,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { } file_google_monitoring_v3_uptime_proto_init() if !protoimpl.UnsafeEnabled { - file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckConfigsRequest); i { case 0: return &v.state @@ -836,7 +836,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckConfigsResponse); i { case 0: return &v.state @@ -848,7 +848,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetUptimeCheckConfigRequest); i { case 0: return &v.state @@ -860,7 +860,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateUptimeCheckConfigRequest); i { case 0: return &v.state @@ -872,7 +872,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*UpdateUptimeCheckConfigRequest); i { case 0: return &v.state @@ -884,7 +884,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteUptimeCheckConfigRequest); i { case 0: return &v.state @@ -896,7 +896,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckIpsRequest); i { case 0: return &v.state @@ -908,7 +908,7 @@ func file_google_monitoring_v3_uptime_service_proto_init() { return nil } } - file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ListUptimeCheckIpsResponse); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go index b1e6939033b..6f7fe5d7c49 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -59,6 +59,7 @@ func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -377,7 +378,9 @@ func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn { func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go index 3a924eb446e..3c111637e19 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -48,6 +48,7 @@ func defaultQueryGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -174,7 +175,9 @@ func (c *queryGRPCClient) Connection() *grpc.ClientConn { func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go index 819f6d767ca..7776c425f9f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -59,6 +59,7 @@ func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -324,7 +325,9 @@ func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn { func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go index 958406f3421..32cad577e3f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -53,6 +53,7 @@ func defaultSnoozeGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -230,7 +231,9 @@ func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go index 05fc20350d0..d3815251374 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -55,6 +55,7 @@ func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -280,7 +281,9 @@ func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn { func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index 197f7f8d335..a8a51fdaa7a 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.18.2" +const Version = "1.20.3" diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go b/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go index 1e842c9fd65..2bc1b35c410 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go @@ -19,8 +19,56 @@ package secretmanager import ( secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" "google.golang.org/api/iterator" + locationpb "google.golang.org/genproto/googleapis/cloud/location" ) +// LocationIterator manages a stream of *locationpb.Location. +type LocationIterator struct { + items []*locationpb.Location + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LocationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LocationIterator) Next() (*locationpb.Location, error) { + var item *locationpb.Location + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LocationIterator) bufLen() int { + return len(it.items) +} + +func (it *LocationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + // SecretIterator manages a stream of *secretmanagerpb.Secret. type SecretIterator struct { items []*secretmanagerpb.Secret diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/secretmanager/apiv1/gapic_metadata.json index 56579a4afdd..897e5d7345d 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/gapic_metadata.json +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/gapic_metadata.json @@ -50,6 +50,11 @@ "GetIamPolicy" ] }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, "GetSecret": { "methods": [ "GetSecret" @@ -60,6 +65,11 @@ "GetSecretVersion" ] }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, "ListSecretVersions": { "methods": [ "ListSecretVersions" @@ -130,6 +140,11 @@ "GetIamPolicy" ] }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, "GetSecret": { "methods": [ "GetSecret" @@ -140,6 +155,11 @@ "GetSecretVersion" ] }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, "ListSecretVersions": { "methods": [ "ListSecretVersions" diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go index 4e866e66d35..a1af6ca462c 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go @@ -35,6 +35,7 @@ import ( "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" httptransport "google.golang.org/api/transport/http" + locationpb "google.golang.org/genproto/googleapis/cloud/location" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/protobuf/encoding/protojson" @@ -60,6 +61,8 @@ type CallOptions struct { SetIamPolicy []gax.CallOption GetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption } func defaultGRPCClientOptions() []option.ClientOption { @@ -71,6 +74,7 @@ func defaultGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://secretmanager.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -133,6 +137,8 @@ func defaultCallOptions() *CallOptions { TestIamPermissions: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, } } @@ -192,6 +198,8 @@ func defaultRESTCallOptions() *CallOptions { TestIamPermissions: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, } } @@ -215,6 +223,8 @@ type internalClient interface { SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator } // Client is a client for interacting with Secret Manager API. @@ -264,13 +274,15 @@ func (c *Client) ListSecrets(ctx context.Context, req *secretmanagerpb.ListSecre return c.internalClient.ListSecrets(ctx, req, opts...) } -// CreateSecret creates a new Secret containing no SecretVersions. +// CreateSecret creates a new Secret containing no +// SecretVersions. func (c *Client) CreateSecret(ctx context.Context, req *secretmanagerpb.CreateSecretRequest, opts ...gax.CallOption) (*secretmanagerpb.Secret, error) { return c.internalClient.CreateSecret(ctx, req, opts...) } -// AddSecretVersion creates a new SecretVersion containing secret data and attaches -// it to an existing Secret. +// AddSecretVersion creates a new SecretVersion +// containing secret data and attaches it to an existing +// Secret. func (c *Client) AddSecretVersion(ctx context.Context, req *secretmanagerpb.AddSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { return c.internalClient.AddSecretVersion(ctx, req, opts...) } @@ -280,7 +292,8 @@ func (c *Client) GetSecret(ctx context.Context, req *secretmanagerpb.GetSecretRe return c.internalClient.GetSecret(ctx, req, opts...) } -// UpdateSecret updates metadata of an existing Secret. +// UpdateSecret updates metadata of an existing +// Secret. func (c *Client) UpdateSecret(ctx context.Context, req *secretmanagerpb.UpdateSecretRequest, opts ...gax.CallOption) (*secretmanagerpb.Secret, error) { return c.internalClient.UpdateSecret(ctx, req, opts...) } @@ -290,13 +303,14 @@ func (c *Client) DeleteSecret(ctx context.Context, req *secretmanagerpb.DeleteSe return c.internalClient.DeleteSecret(ctx, req, opts...) } -// ListSecretVersions lists SecretVersions. This call does not return secret -// data. +// ListSecretVersions lists SecretVersions. This +// call does not return secret data. func (c *Client) ListSecretVersions(ctx context.Context, req *secretmanagerpb.ListSecretVersionsRequest, opts ...gax.CallOption) *SecretVersionIterator { return c.internalClient.ListSecretVersions(ctx, req, opts...) } -// GetSecretVersion gets metadata for a SecretVersion. +// GetSecretVersion gets metadata for a +// SecretVersion. // // projects/*/secrets/*/versions/latest is an alias to the most recently // created SecretVersion. @@ -304,7 +318,8 @@ func (c *Client) GetSecretVersion(ctx context.Context, req *secretmanagerpb.GetS return c.internalClient.GetSecretVersion(ctx, req, opts...) } -// AccessSecretVersion accesses a SecretVersion. This call returns the secret data. +// AccessSecretVersion accesses a SecretVersion. +// This call returns the secret data. // // projects/*/secrets/*/versions/latest is an alias to the most recently // created SecretVersion. @@ -314,7 +329,8 @@ func (c *Client) AccessSecretVersion(ctx context.Context, req *secretmanagerpb.A // DisableSecretVersion disables a SecretVersion. // -// Sets the state of the SecretVersion to +// Sets the state of the +// SecretVersion to // DISABLED. func (c *Client) DisableSecretVersion(ctx context.Context, req *secretmanagerpb.DisableSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { return c.internalClient.DisableSecretVersion(ctx, req, opts...) @@ -322,7 +338,8 @@ func (c *Client) DisableSecretVersion(ctx context.Context, req *secretmanagerpb. // EnableSecretVersion enables a SecretVersion. // -// Sets the state of the SecretVersion to +// Sets the state of the +// SecretVersion to // ENABLED. func (c *Client) EnableSecretVersion(ctx context.Context, req *secretmanagerpb.EnableSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { return c.internalClient.EnableSecretVersion(ctx, req, opts...) @@ -330,9 +347,10 @@ func (c *Client) EnableSecretVersion(ctx context.Context, req *secretmanagerpb.E // DestroySecretVersion destroys a SecretVersion. // -// Sets the state of the SecretVersion to -// DESTROYED and irrevocably destroys the -// secret data. +// Sets the state of the +// SecretVersion to +// DESTROYED +// and irrevocably destroys the secret data. func (c *Client) DestroySecretVersion(ctx context.Context, req *secretmanagerpb.DestroySecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { return c.internalClient.DestroySecretVersion(ctx, req, opts...) } @@ -340,8 +358,10 @@ func (c *Client) DestroySecretVersion(ctx context.Context, req *secretmanagerpb. // SetIamPolicy sets the access control policy on the specified secret. Replaces any // existing policy. // -// Permissions on SecretVersions are enforced according -// to the policy set on the associated Secret. +// Permissions on +// SecretVersions are enforced +// according to the policy set on the associated +// Secret. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -363,6 +383,16 @@ func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermi return c.internalClient.TestIamPermissions(ctx, req, opts...) } +// GetLocation gets information about a location. +func (c *Client) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *Client) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + // gRPCClient is a client for interacting with Secret Manager API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. @@ -376,6 +406,8 @@ type gRPCClient struct { // The gRPC API client. client secretmanagerpb.SecretManagerServiceClient + locationsClient locationpb.LocationsClient + // The x-goog-* metadata to be sent with each request. xGoogHeaders []string } @@ -408,9 +440,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client := Client{CallOptions: defaultCallOptions()} c := &gRPCClient{ - connPool: connPool, - client: secretmanagerpb.NewSecretManagerServiceClient(connPool), - CallOptions: &client.CallOptions, + connPool: connPool, + client: secretmanagerpb.NewSecretManagerServiceClient(connPool), + CallOptions: &client.CallOptions, + locationsClient: locationpb.NewLocationsClient(connPool), } c.setGoogleClientInfo() @@ -433,7 +466,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn { func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when @@ -493,6 +528,7 @@ func defaultRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://secretmanager.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -502,7 +538,9 @@ func defaultRESTClientOptions() []option.ClientOption { func (c *restClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when @@ -841,6 +879,70 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP return resp, nil } +func (c *gRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + // ListSecrets lists Secrets. func (c *restClient) ListSecrets(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) *SecretIterator { it := &SecretIterator{} @@ -933,7 +1035,8 @@ func (c *restClient) ListSecrets(ctx context.Context, req *secretmanagerpb.ListS return it } -// CreateSecret creates a new Secret containing no SecretVersions. +// CreateSecret creates a new Secret containing no +// SecretVersions. func (c *restClient) CreateSecret(ctx context.Context, req *secretmanagerpb.CreateSecretRequest, opts ...gax.CallOption) (*secretmanagerpb.Secret, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} body := req.GetSecret() @@ -1001,8 +1104,9 @@ func (c *restClient) CreateSecret(ctx context.Context, req *secretmanagerpb.Crea return resp, nil } -// AddSecretVersion creates a new SecretVersion containing secret data and attaches -// it to an existing Secret. +// AddSecretVersion creates a new SecretVersion +// containing secret data and attaches it to an existing +// Secret. func (c *restClient) AddSecretVersion(ctx context.Context, req *secretmanagerpb.AddSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} jsonReq, err := m.Marshal(req) @@ -1128,7 +1232,8 @@ func (c *restClient) GetSecret(ctx context.Context, req *secretmanagerpb.GetSecr return resp, nil } -// UpdateSecret updates metadata of an existing Secret. +// UpdateSecret updates metadata of an existing +// Secret. func (c *restClient) UpdateSecret(ctx context.Context, req *secretmanagerpb.UpdateSecretRequest, opts ...gax.CallOption) (*secretmanagerpb.Secret, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} body := req.GetSecret() @@ -1247,8 +1352,8 @@ func (c *restClient) DeleteSecret(ctx context.Context, req *secretmanagerpb.Dele }, opts...) } -// ListSecretVersions lists SecretVersions. This call does not return secret -// data. +// ListSecretVersions lists SecretVersions. This +// call does not return secret data. func (c *restClient) ListSecretVersions(ctx context.Context, req *secretmanagerpb.ListSecretVersionsRequest, opts ...gax.CallOption) *SecretVersionIterator { it := &SecretVersionIterator{} req = proto.Clone(req).(*secretmanagerpb.ListSecretVersionsRequest) @@ -1340,7 +1445,8 @@ func (c *restClient) ListSecretVersions(ctx context.Context, req *secretmanagerp return it } -// GetSecretVersion gets metadata for a SecretVersion. +// GetSecretVersion gets metadata for a +// SecretVersion. // // projects/*/secrets/*/versions/latest is an alias to the most recently // created SecretVersion. @@ -1403,7 +1509,8 @@ func (c *restClient) GetSecretVersion(ctx context.Context, req *secretmanagerpb. return resp, nil } -// AccessSecretVersion accesses a SecretVersion. This call returns the secret data. +// AccessSecretVersion accesses a SecretVersion. +// This call returns the secret data. // // projects/*/secrets/*/versions/latest is an alias to the most recently // created SecretVersion. @@ -1468,7 +1575,8 @@ func (c *restClient) AccessSecretVersion(ctx context.Context, req *secretmanager // DisableSecretVersion disables a SecretVersion. // -// Sets the state of the SecretVersion to +// Sets the state of the +// SecretVersion to // DISABLED. func (c *restClient) DisableSecretVersion(ctx context.Context, req *secretmanagerpb.DisableSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} @@ -1537,7 +1645,8 @@ func (c *restClient) DisableSecretVersion(ctx context.Context, req *secretmanage // EnableSecretVersion enables a SecretVersion. // -// Sets the state of the SecretVersion to +// Sets the state of the +// SecretVersion to // ENABLED. func (c *restClient) EnableSecretVersion(ctx context.Context, req *secretmanagerpb.EnableSecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} @@ -1606,9 +1715,10 @@ func (c *restClient) EnableSecretVersion(ctx context.Context, req *secretmanager // DestroySecretVersion destroys a SecretVersion. // -// Sets the state of the SecretVersion to -// DESTROYED and irrevocably destroys the -// secret data. +// Sets the state of the +// SecretVersion to +// DESTROYED +// and irrevocably destroys the secret data. func (c *restClient) DestroySecretVersion(ctx context.Context, req *secretmanagerpb.DestroySecretVersionRequest, opts ...gax.CallOption) (*secretmanagerpb.SecretVersion, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} jsonReq, err := m.Marshal(req) @@ -1677,8 +1787,10 @@ func (c *restClient) DestroySecretVersion(ctx context.Context, req *secretmanage // SetIamPolicy sets the access control policy on the specified secret. Replaces any // existing policy. // -// Permissions on SecretVersions are enforced according -// to the policy set on the associated Secret. +// Permissions on +// SecretVersions are enforced +// according to the policy set on the associated +// Secret. func (c *restClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} jsonReq, err := m.Marshal(req) @@ -1879,3 +1991,155 @@ func (c *restClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP } return resp, nil } + +// GetLocation gets information about a location. +func (c *restClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &locationpb.Location{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListLocations lists information about the supported locations for this service. +func (c *restClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go index fe16ac68f60..0498621b9cc 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/cloud/secretmanager/v1/resources.proto package secretmanagerpb @@ -119,7 +119,7 @@ type Secret struct { // [Secret][google.cloud.secretmanager.v1.Secret] in the format // `projects/*/secrets/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. Immutable. The replication policy of the secret data attached to + // Optional. Immutable. The replication policy of the secret data attached to // the [Secret][google.cloud.secretmanager.v1.Secret]. // // The replication policy cannot be changed after the Secret has been created. @@ -175,7 +175,7 @@ type Secret struct { // No more than 50 aliases can be assigned to a given secret. // // Version-Alias pairs will be viewable via GetSecret and modifiable via - // UpdateSecret. At launch access by alias will only be supported on + // UpdateSecret. Access by alias is only be supported on // GetSecretVersion and AccessSecretVersion. VersionAliases map[string]int64 `protobuf:"bytes,11,rep,name=version_aliases,json=versionAliases,proto3" json:"version_aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // Optional. Custom metadata about the secret. @@ -191,6 +191,23 @@ type Secret struct { // // The total size of annotation keys and values must be less than 16KiB. Annotations map[string]string `protobuf:"bytes,13,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. Secret Version TTL after destruction request + // + // This is a part of the Delayed secret version destroy feature. + // For secret with TTL>0, version destruction doesn't happen immediately + // on calling destroy instead the version goes to a disabled state and + // destruction happens after the TTL expires. + VersionDestroyTtl *durationpb.Duration `protobuf:"bytes,14,opt,name=version_destroy_ttl,json=versionDestroyTtl,proto3" json:"version_destroy_ttl,omitempty"` + // Optional. The customer-managed encryption configuration of the Regionalised + // Secrets. If no configuration is provided, Google-managed default encryption + // is used. + // + // Updates to the [Secret][google.cloud.secretmanager.v1.Secret] encryption + // configuration only apply to + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] added + // afterwards. They do not apply retroactively to existing + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. + CustomerManagedEncryption *CustomerManagedEncryption `protobuf:"bytes,15,opt,name=customer_managed_encryption,json=customerManagedEncryption,proto3" json:"customer_managed_encryption,omitempty"` } func (x *Secret) Reset() { @@ -309,6 +326,20 @@ func (x *Secret) GetAnnotations() map[string]string { return nil } +func (x *Secret) GetVersionDestroyTtl() *durationpb.Duration { + if x != nil { + return x.VersionDestroyTtl + } + return nil +} + +func (x *Secret) GetCustomerManagedEncryption() *CustomerManagedEncryption { + if x != nil { + return x.CustomerManagedEncryption + } + return nil +} + type isSecret_Expiration interface { isSecret_Expiration() } @@ -369,6 +400,18 @@ type SecretVersion struct { // on // [SecretManagerService.AddSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AddSecretVersion]. ClientSpecifiedPayloadChecksum bool `protobuf:"varint,7,opt,name=client_specified_payload_checksum,json=clientSpecifiedPayloadChecksum,proto3" json:"client_specified_payload_checksum,omitempty"` + // Optional. Output only. Scheduled destroy time for secret version. + // This is a part of the Delayed secret version destroy feature. For a + // Secret with a valid version destroy TTL, when a secert version is + // destroyed, the version is moved to disabled state and it is scheduled for + // destruction. The version is destroyed only after the + // `scheduled_destroy_time`. + ScheduledDestroyTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=scheduled_destroy_time,json=scheduledDestroyTime,proto3" json:"scheduled_destroy_time,omitempty"` + // Output only. The customer-managed encryption status of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. Only + // populated if customer-managed encryption is used and + // [Secret][google.cloud.secretmanager.v1.Secret] is a Regionalised Secret. + CustomerManagedEncryption *CustomerManagedEncryptionStatus `protobuf:"bytes,9,opt,name=customer_managed_encryption,json=customerManagedEncryption,proto3" json:"customer_managed_encryption,omitempty"` } func (x *SecretVersion) Reset() { @@ -452,6 +495,20 @@ func (x *SecretVersion) GetClientSpecifiedPayloadChecksum() bool { return false } +func (x *SecretVersion) GetScheduledDestroyTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledDestroyTime + } + return nil +} + +func (x *SecretVersion) GetCustomerManagedEncryption() *CustomerManagedEncryptionStatus { + if x != nil { + return x.CustomerManagedEncryption + } + return nil +} + // A policy that defines the replication and encryption configuration of data. type Replication struct { state protoimpl.MessageState @@ -762,8 +819,9 @@ type Topic struct { // Required. The resource name of the Pub/Sub topic that will be published to, // in the following format: `projects/*/topics/*`. For publication to succeed, - // the Secret Manager P4SA must have `pubsub.publisher` permissions on the - // topic. + // the Secret Manager service agent must have the `pubsub.topic.publish` + // permission on the topic. The Pub/Sub Publisher role + // (`roles/pubsub.publisher`) includes this permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1330,14 +1388,14 @@ var file_google_cloud_secretmanager_v1_resources_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x08, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x0a, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, @@ -1377,197 +1435,236 @@ var file_google_cloud_secretmanager_v1_resources_proto_rawDesc = []byte{ 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4d, 0xea, 0x41, 0x4a, 0x0a, 0x23, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x7d, 0x42, 0x0c, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x42, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4e, 0x0a, - 0x21, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0x48, 0x0a, - 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, - 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, - 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x3a, 0x6e, 0xea, 0x41, 0x6b, 0x0a, 0x2a, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x7d, 0x2f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xf4, 0x04, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, - 0x61, 0x74, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, - 0x48, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x5b, 0x0a, - 0x0c, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x75, - 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x1a, 0x8a, 0x01, 0x0a, 0x09, 0x41, - 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x7d, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x74, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x74, 0x6c, 0x12, 0x7d, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x95, 0x02, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x12, 0x5f, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x99, 0x01, 0xea, 0x41, 0x95, 0x01, 0x0a, 0x23, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x7d, 0x12, 0x38, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x7d, + 0x2a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x32, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0xd3, 0x07, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, + 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x4d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x5f, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4e, 0x0a, 0x21, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x55, 0x0a, 0x16, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x14, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x64, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x83, 0x01, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x19, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, + 0x3a, 0xe2, 0x01, 0xea, 0x41, 0xde, 0x01, 0x0a, 0x2a, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x7d, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x7d, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x7d, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2a, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x0d, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf4, 0x04, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, + 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x5b, 0x0a, 0x0c, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x73, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x1a, 0x8a, 0x01, 0x0a, 0x09, 0x41, 0x75, 0x74, + 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x7d, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x95, 0x02, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x64, 0x12, 0x5f, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x1a, 0xa4, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7d, + 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0d, 0x0a, + 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x19, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0xd0, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x60, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, + 0x74, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x64, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x1a, 0xa4, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x7d, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x0d, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, - 0x0a, 0x19, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x22, 0xd0, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x60, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, - 0x6d, 0x61, 0x74, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x41, 0x75, 0x74, 0x6f, + 0x6d, 0x61, 0x74, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x67, 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x64, 0x1a, 0x97, 0x01, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x83, 0x01, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x41, 0x75, - 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, - 0x09, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x12, 0x67, 0x0a, 0x0c, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x64, 0x1a, 0x97, 0x01, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, - 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x83, 0x01, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xbf, 0x02, 0x0a, 0x11, + 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x71, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x1a, 0xb6, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x83, 0x01, 0x0a, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xbf, 0x02, - 0x0a, 0x11, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x71, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x1a, 0xb6, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x83, 0x01, 0x0a, 0x1b, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x19, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x14, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x57, 0x0a, 0x1f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, - 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x14, 0x6b, 0x6d, 0x73, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x6b, 0x6d, 0x73, - 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x65, - 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x3a, 0x43, 0xea, 0x41, 0x40, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x22, 0xa2, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x10, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x04, 0x52, 0x0e, 0x72, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0x5e, 0x0a, 0x0d, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x29, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x61, 0x74, - 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x42, 0xea, 0x01, 0x0a, 0x21, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x42, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x47, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x53, 0x4d, 0xaa, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x14, 0x0a, + 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x57, 0x0a, 0x1f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x14, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x6b, 0x6d, 0x73, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x05, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x43, + 0xea, 0x41, 0x40, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x7d, 0x22, 0xa2, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4d, 0x0a, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6e, + 0x65, 0x78, 0x74, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x47, 0x0a, 0x0f, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x04, 0x52, 0x0e, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0x5e, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x29, 0x0a, + 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x42, 0xea, 0x01, 0x0a, 0x21, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x47, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x53, 0x4d, 0xaa, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5c, + 0x56, 0x31, 0xea, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1584,7 +1681,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_rawDescGZIP() []byte { var file_google_cloud_secretmanager_v1_resources_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_cloud_secretmanager_v1_resources_proto_msgTypes = make([]protoimpl.MessageInfo, 18) -var file_google_cloud_secretmanager_v1_resources_proto_goTypes = []interface{}{ +var file_google_cloud_secretmanager_v1_resources_proto_goTypes = []any{ (SecretVersion_State)(0), // 0: google.cloud.secretmanager.v1.SecretVersion.State (*Secret)(nil), // 1: google.cloud.secretmanager.v1.Secret (*SecretVersion)(nil), // 2: google.cloud.secretmanager.v1.SecretVersion @@ -1617,27 +1714,31 @@ var file_google_cloud_secretmanager_v1_resources_proto_depIdxs = []int32{ 8, // 6: google.cloud.secretmanager.v1.Secret.rotation:type_name -> google.cloud.secretmanager.v1.Rotation 11, // 7: google.cloud.secretmanager.v1.Secret.version_aliases:type_name -> google.cloud.secretmanager.v1.Secret.VersionAliasesEntry 12, // 8: google.cloud.secretmanager.v1.Secret.annotations:type_name -> google.cloud.secretmanager.v1.Secret.AnnotationsEntry - 19, // 9: google.cloud.secretmanager.v1.SecretVersion.create_time:type_name -> google.protobuf.Timestamp - 19, // 10: google.cloud.secretmanager.v1.SecretVersion.destroy_time:type_name -> google.protobuf.Timestamp - 0, // 11: google.cloud.secretmanager.v1.SecretVersion.state:type_name -> google.cloud.secretmanager.v1.SecretVersion.State - 5, // 12: google.cloud.secretmanager.v1.SecretVersion.replication_status:type_name -> google.cloud.secretmanager.v1.ReplicationStatus - 13, // 13: google.cloud.secretmanager.v1.Replication.automatic:type_name -> google.cloud.secretmanager.v1.Replication.Automatic - 14, // 14: google.cloud.secretmanager.v1.Replication.user_managed:type_name -> google.cloud.secretmanager.v1.Replication.UserManaged - 16, // 15: google.cloud.secretmanager.v1.ReplicationStatus.automatic:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.AutomaticStatus - 17, // 16: google.cloud.secretmanager.v1.ReplicationStatus.user_managed:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus - 19, // 17: google.cloud.secretmanager.v1.Rotation.next_rotation_time:type_name -> google.protobuf.Timestamp - 20, // 18: google.cloud.secretmanager.v1.Rotation.rotation_period:type_name -> google.protobuf.Duration - 4, // 19: google.cloud.secretmanager.v1.Replication.Automatic.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryption - 15, // 20: google.cloud.secretmanager.v1.Replication.UserManaged.replicas:type_name -> google.cloud.secretmanager.v1.Replication.UserManaged.Replica - 4, // 21: google.cloud.secretmanager.v1.Replication.UserManaged.Replica.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryption - 6, // 22: google.cloud.secretmanager.v1.ReplicationStatus.AutomaticStatus.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryptionStatus - 18, // 23: google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.replicas:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.ReplicaStatus - 6, // 24: google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.ReplicaStatus.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryptionStatus - 25, // [25:25] is the sub-list for method output_type - 25, // [25:25] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 20, // 9: google.cloud.secretmanager.v1.Secret.version_destroy_ttl:type_name -> google.protobuf.Duration + 4, // 10: google.cloud.secretmanager.v1.Secret.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryption + 19, // 11: google.cloud.secretmanager.v1.SecretVersion.create_time:type_name -> google.protobuf.Timestamp + 19, // 12: google.cloud.secretmanager.v1.SecretVersion.destroy_time:type_name -> google.protobuf.Timestamp + 0, // 13: google.cloud.secretmanager.v1.SecretVersion.state:type_name -> google.cloud.secretmanager.v1.SecretVersion.State + 5, // 14: google.cloud.secretmanager.v1.SecretVersion.replication_status:type_name -> google.cloud.secretmanager.v1.ReplicationStatus + 19, // 15: google.cloud.secretmanager.v1.SecretVersion.scheduled_destroy_time:type_name -> google.protobuf.Timestamp + 6, // 16: google.cloud.secretmanager.v1.SecretVersion.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryptionStatus + 13, // 17: google.cloud.secretmanager.v1.Replication.automatic:type_name -> google.cloud.secretmanager.v1.Replication.Automatic + 14, // 18: google.cloud.secretmanager.v1.Replication.user_managed:type_name -> google.cloud.secretmanager.v1.Replication.UserManaged + 16, // 19: google.cloud.secretmanager.v1.ReplicationStatus.automatic:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.AutomaticStatus + 17, // 20: google.cloud.secretmanager.v1.ReplicationStatus.user_managed:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus + 19, // 21: google.cloud.secretmanager.v1.Rotation.next_rotation_time:type_name -> google.protobuf.Timestamp + 20, // 22: google.cloud.secretmanager.v1.Rotation.rotation_period:type_name -> google.protobuf.Duration + 4, // 23: google.cloud.secretmanager.v1.Replication.Automatic.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryption + 15, // 24: google.cloud.secretmanager.v1.Replication.UserManaged.replicas:type_name -> google.cloud.secretmanager.v1.Replication.UserManaged.Replica + 4, // 25: google.cloud.secretmanager.v1.Replication.UserManaged.Replica.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryption + 6, // 26: google.cloud.secretmanager.v1.ReplicationStatus.AutomaticStatus.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryptionStatus + 18, // 27: google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.replicas:type_name -> google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.ReplicaStatus + 6, // 28: google.cloud.secretmanager.v1.ReplicationStatus.UserManagedStatus.ReplicaStatus.customer_managed_encryption:type_name -> google.cloud.secretmanager.v1.CustomerManagedEncryptionStatus + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_cloud_secretmanager_v1_resources_proto_init() } @@ -1646,7 +1747,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Secret); i { case 0: return &v.state @@ -1658,7 +1759,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*SecretVersion); i { case 0: return &v.state @@ -1670,7 +1771,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Replication); i { case 0: return &v.state @@ -1682,7 +1783,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CustomerManagedEncryption); i { case 0: return &v.state @@ -1694,7 +1795,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ReplicationStatus); i { case 0: return &v.state @@ -1706,7 +1807,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*CustomerManagedEncryptionStatus); i { case 0: return &v.state @@ -1718,7 +1819,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*Topic); i { case 0: return &v.state @@ -1730,7 +1831,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Rotation); i { case 0: return &v.state @@ -1742,7 +1843,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SecretPayload); i { case 0: return &v.state @@ -1754,7 +1855,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Replication_Automatic); i { case 0: return &v.state @@ -1766,7 +1867,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Replication_UserManaged); i { case 0: return &v.state @@ -1778,7 +1879,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*Replication_UserManaged_Replica); i { case 0: return &v.state @@ -1790,7 +1891,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ReplicationStatus_AutomaticStatus); i { case 0: return &v.state @@ -1802,7 +1903,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ReplicationStatus_UserManagedStatus); i { case 0: return &v.state @@ -1814,7 +1915,7 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ReplicationStatus_UserManagedStatus_ReplicaStatus); i { case 0: return &v.state @@ -1827,19 +1928,19 @@ func file_google_cloud_secretmanager_v1_resources_proto_init() { } } } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[0].OneofWrappers = []any{ (*Secret_ExpireTime)(nil), (*Secret_Ttl)(nil), } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[2].OneofWrappers = []any{ (*Replication_Automatic_)(nil), (*Replication_UserManaged_)(nil), } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[4].OneofWrappers = []any{ (*ReplicationStatus_Automatic)(nil), (*ReplicationStatus_UserManaged)(nil), } - file_google_cloud_secretmanager_v1_resources_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_google_cloud_secretmanager_v1_resources_proto_msgTypes[8].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go index 77ac90f0ae7..3650cc23c36 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/cloud/secretmanager/v1/service.proto package secretmanagerpb @@ -43,14 +43,16 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Request message for [SecretManagerService.ListSecrets][google.cloud.secretmanager.v1.SecretManagerService.ListSecrets]. +// Request message for +// [SecretManagerService.ListSecrets][google.cloud.secretmanager.v1.SecretManagerService.ListSecrets]. type ListSecretsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The resource name of the project associated with the - // [Secrets][google.cloud.secretmanager.v1.Secret], in the format `projects/*`. + // [Secrets][google.cloud.secretmanager.v1.Secret], in the format `projects/*` + // or `projects/*/locations/*` Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. The maximum number of results to be returned in a single page. If // set to 0, the server decides the number of results to return. If the @@ -127,19 +129,24 @@ func (x *ListSecretsRequest) GetFilter() string { return "" } -// Response message for [SecretManagerService.ListSecrets][google.cloud.secretmanager.v1.SecretManagerService.ListSecrets]. +// Response message for +// [SecretManagerService.ListSecrets][google.cloud.secretmanager.v1.SecretManagerService.ListSecrets]. type ListSecretsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The list of [Secrets][google.cloud.secretmanager.v1.Secret] sorted in reverse by create_time (newest - // first). + // The list of [Secrets][google.cloud.secretmanager.v1.Secret] sorted in + // reverse by create_time (newest first). Secrets []*Secret `protobuf:"bytes,1,rep,name=secrets,proto3" json:"secrets,omitempty"` // A token to retrieve the next page of results. Pass this value in - // [ListSecretsRequest.page_token][google.cloud.secretmanager.v1.ListSecretsRequest.page_token] to retrieve the next page. + // [ListSecretsRequest.page_token][google.cloud.secretmanager.v1.ListSecretsRequest.page_token] + // to retrieve the next page. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // The total number of [Secrets][google.cloud.secretmanager.v1.Secret]. + // The total number of [Secrets][google.cloud.secretmanager.v1.Secret] but 0 + // when the + // [ListSecretsRequest.filter][google.cloud.secretmanager.v1.ListSecretsRequest.filter] + // field is set. TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` } @@ -196,14 +203,16 @@ func (x *ListSecretsResponse) GetTotalSize() int32 { return 0 } -// Request message for [SecretManagerService.CreateSecret][google.cloud.secretmanager.v1.SecretManagerService.CreateSecret]. +// Request message for +// [SecretManagerService.CreateSecret][google.cloud.secretmanager.v1.SecretManagerService.CreateSecret]. type CreateSecretRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The resource name of the project to associate with the - // [Secret][google.cloud.secretmanager.v1.Secret], in the format `projects/*`. + // [Secret][google.cloud.secretmanager.v1.Secret], in the format `projects/*` + // or `projects/*/locations/*`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. This must be unique within the project. // @@ -211,7 +220,8 @@ type CreateSecretRequest struct { // contain uppercase and lowercase letters, numerals, and the hyphen (`-`) and // underscore (`_`) characters. SecretId string `protobuf:"bytes,2,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` - // Required. A [Secret][google.cloud.secretmanager.v1.Secret] with initial field values. + // Required. A [Secret][google.cloud.secretmanager.v1.Secret] with initial + // field values. Secret *Secret `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` } @@ -268,16 +278,20 @@ func (x *CreateSecretRequest) GetSecret() *Secret { return nil } -// Request message for [SecretManagerService.AddSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AddSecretVersion]. +// Request message for +// [SecretManagerService.AddSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AddSecretVersion]. type AddSecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [Secret][google.cloud.secretmanager.v1.Secret] to associate with the - // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format `projects/*/secrets/*`. + // Required. The resource name of the + // [Secret][google.cloud.secretmanager.v1.Secret] to associate with the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format + // `projects/*/secrets/*` or `projects/*/locations/*/secrets/*`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The secret payload of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // Required. The secret payload of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. Payload *SecretPayload `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` } @@ -327,13 +341,16 @@ func (x *AddSecretVersionRequest) GetPayload() *SecretPayload { return nil } -// Request message for [SecretManagerService.GetSecret][google.cloud.secretmanager.v1.SecretManagerService.GetSecret]. +// Request message for +// [SecretManagerService.GetSecret][google.cloud.secretmanager.v1.SecretManagerService.GetSecret]. type GetSecretRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [Secret][google.cloud.secretmanager.v1.Secret], in the format `projects/*/secrets/*`. + // Required. The resource name of the + // [Secret][google.cloud.secretmanager.v1.Secret], in the format + // `projects/*/secrets/*` or `projects/*/locations/*/secrets/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -376,15 +393,17 @@ func (x *GetSecretRequest) GetName() string { return "" } -// Request message for [SecretManagerService.ListSecretVersions][google.cloud.secretmanager.v1.SecretManagerService.ListSecretVersions]. +// Request message for +// [SecretManagerService.ListSecretVersions][google.cloud.secretmanager.v1.SecretManagerService.ListSecretVersions]. type ListSecretVersionsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [Secret][google.cloud.secretmanager.v1.Secret] associated with the - // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] to list, in the format - // `projects/*/secrets/*`. + // Required. The resource name of the + // [Secret][google.cloud.secretmanager.v1.Secret] associated with the + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] to list, in + // the format `projects/*/secrets/*` or `projects/*/locations/*/secrets/*`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. The maximum number of results to be returned in a single page. If // set to 0, the server decides the number of results to return. If the @@ -461,19 +480,25 @@ func (x *ListSecretVersionsRequest) GetFilter() string { return "" } -// Response message for [SecretManagerService.ListSecretVersions][google.cloud.secretmanager.v1.SecretManagerService.ListSecretVersions]. +// Response message for +// [SecretManagerService.ListSecretVersions][google.cloud.secretmanager.v1.SecretManagerService.ListSecretVersions]. type ListSecretVersionsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The list of [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] sorted in reverse by - // create_time (newest first). + // The list of [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] + // sorted in reverse by create_time (newest first). Versions []*SecretVersion `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions,omitempty"` // A token to retrieve the next page of results. Pass this value in - // [ListSecretVersionsRequest.page_token][google.cloud.secretmanager.v1.ListSecretVersionsRequest.page_token] to retrieve the next page. + // [ListSecretVersionsRequest.page_token][google.cloud.secretmanager.v1.ListSecretVersionsRequest.page_token] + // to retrieve the next page. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - // The total number of [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. + // The total number of + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] but 0 when + // the + // [ListSecretsRequest.filter][google.cloud.secretmanager.v1.ListSecretsRequest.filter] + // field is set. TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` } @@ -530,17 +555,22 @@ func (x *ListSecretVersionsResponse) GetTotalSize() int32 { return 0 } -// Request message for [SecretManagerService.GetSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.GetSecretVersion]. +// Request message for +// [SecretManagerService.GetSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.GetSecretVersion]. type GetSecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format - // `projects/*/secrets/*/versions/*`. + // Required. The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format + // `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. // - // `projects/*/secrets/*/versions/latest` is an alias to the most recently - // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // `projects/*/secrets/*/versions/latest` or + // `projects/*/locations/*/secrets/*/versions/latest` is an alias to the most + // recently created + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -583,13 +613,15 @@ func (x *GetSecretVersionRequest) GetName() string { return "" } -// Request message for [SecretManagerService.UpdateSecret][google.cloud.secretmanager.v1.SecretManagerService.UpdateSecret]. +// Request message for +// [SecretManagerService.UpdateSecret][google.cloud.secretmanager.v1.SecretManagerService.UpdateSecret]. type UpdateSecretRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. [Secret][google.cloud.secretmanager.v1.Secret] with updated field values. + // Required. [Secret][google.cloud.secretmanager.v1.Secret] with updated field + // values. Secret *Secret `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` // Required. Specifies the fields to be updated. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` @@ -641,17 +673,22 @@ func (x *UpdateSecretRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } -// Request message for [SecretManagerService.AccessSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AccessSecretVersion]. +// Request message for +// [SecretManagerService.AccessSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AccessSecretVersion]. type AccessSecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format - // `projects/*/secrets/*/versions/*`. + // Required. The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format + // `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. // - // `projects/*/secrets/*/versions/latest` is an alias to the most recently - // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // `projects/*/secrets/*/versions/latest` or + // `projects/*/locations/*/secrets/*/versions/latest` is an alias to the most + // recently created + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -694,14 +731,17 @@ func (x *AccessSecretVersionRequest) GetName() string { return "" } -// Response message for [SecretManagerService.AccessSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AccessSecretVersion]. +// Response message for +// [SecretManagerService.AccessSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.AccessSecretVersion]. type AccessSecretVersionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format - // `projects/*/secrets/*/versions/*`. + // The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] in the format + // `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Secret payload Payload *SecretPayload `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` @@ -753,18 +793,20 @@ func (x *AccessSecretVersionResponse) GetPayload() *SecretPayload { return nil } -// Request message for [SecretManagerService.DeleteSecret][google.cloud.secretmanager.v1.SecretManagerService.DeleteSecret]. +// Request message for +// [SecretManagerService.DeleteSecret][google.cloud.secretmanager.v1.SecretManagerService.DeleteSecret]. type DeleteSecretRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [Secret][google.cloud.secretmanager.v1.Secret] to delete in the format + // Required. The resource name of the + // [Secret][google.cloud.secretmanager.v1.Secret] to delete in the format // `projects/*/secrets/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. Etag of the [Secret][google.cloud.secretmanager.v1.Secret]. The request succeeds if it matches - // the etag of the currently stored secret object. If the etag is omitted, - // the request succeeds. + // Optional. Etag of the [Secret][google.cloud.secretmanager.v1.Secret]. The + // request succeeds if it matches the etag of the currently stored secret + // object. If the etag is omitted, the request succeeds. Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -814,18 +856,22 @@ func (x *DeleteSecretRequest) GetEtag() string { return "" } -// Request message for [SecretManagerService.DisableSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.DisableSecretVersion]. +// Request message for +// [SecretManagerService.DisableSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.DisableSecretVersion]. type DisableSecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to disable in the format - // `projects/*/secrets/*/versions/*`. + // Required. The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to disable in + // the format `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. Etag of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request succeeds if it matches - // the etag of the currently stored secret version object. If the etag is - // omitted, the request succeeds. + // Optional. Etag of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request + // succeeds if it matches the etag of the currently stored secret version + // object. If the etag is omitted, the request succeeds. Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -875,18 +921,22 @@ func (x *DisableSecretVersionRequest) GetEtag() string { return "" } -// Request message for [SecretManagerService.EnableSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.EnableSecretVersion]. +// Request message for +// [SecretManagerService.EnableSecretVersion][google.cloud.secretmanager.v1.SecretManagerService.EnableSecretVersion]. type EnableSecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to enable in the format - // `projects/*/secrets/*/versions/*`. + // Required. The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to enable in + // the format `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. Etag of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request succeeds if it matches - // the etag of the currently stored secret version object. If the etag is - // omitted, the request succeeds. + // Optional. Etag of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request + // succeeds if it matches the etag of the currently stored secret version + // object. If the etag is omitted, the request succeeds. Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -936,18 +986,22 @@ func (x *EnableSecretVersionRequest) GetEtag() string { return "" } -// Request message for [SecretManagerService.DestroySecretVersion][google.cloud.secretmanager.v1.SecretManagerService.DestroySecretVersion]. +// Request message for +// [SecretManagerService.DestroySecretVersion][google.cloud.secretmanager.v1.SecretManagerService.DestroySecretVersion]. type DestroySecretVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to destroy in the format - // `projects/*/secrets/*/versions/*`. + // Required. The resource name of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to destroy in + // the format `projects/*/secrets/*/versions/*` or + // `projects/*/locations/*/secrets/*/versions/*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. Etag of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request succeeds if it matches - // the etag of the currently stored secret version object. If the etag is - // omitted, the request succeeds. + // Optional. Etag of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. The request + // succeeds if it matches the etag of the currently stored secret version + // object. If the etag is omitted, the request succeeds. Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -1022,35 +1076,34 @@ var file_google_cloud_secretmanager_v1_service_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x4c, 0x69, + 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, - 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, + 0x12, 0x43, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x12, 0x23, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3f, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, + 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x43, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x12, 0x23, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, @@ -1161,122 +1214,158 @@ var file_google_cloud_secretmanager_v1_service_proto_rawDesc = []byte{ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x32, 0xcf, 0x15, 0x0a, 0x14, 0x53, 0x65, 0x63, + 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x32, 0xe8, 0x1c, 0x0a, 0x14, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x65, 0x12, 0xd5, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x0c, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x49, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x2c, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x22, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x73, 0x12, 0xc2, 0x01, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x50, 0x5a, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x48, 0xda, 0x41, - 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x3a, 0x01, 0x2a, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x64, 0x64, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x93, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x2e, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb6, 0x01, 0x0a, - 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x80, 0x01, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x60, 0x3a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5a, 0x35, 0x3a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x2b, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x22, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x82, 0x02, 0x0a, 0x10, 0x41, 0x64, 0x64, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x32, 0x26, 0x2f, - 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x21, 0x2a, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x01, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x70, 0x3a, 0x01, 0x2a, + 0x5a, 0x3d, 0x3a, 0x01, 0x2a, 0x22, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x64, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x12, 0xc6, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, - 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, - 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x2a, 0x7d, 0x3a, 0x61, 0x64, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xc2, 0x01, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x22, 0x5d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x50, 0x5a, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xb3, 0x01, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, - 0x7d, 0x12, 0xce, 0x01, 0x0a, 0x13, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x40, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x12, - 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x12, 0xc6, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x2e, 0x67, 0x6f, + 0x2a, 0x7d, 0x12, 0xf5, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x89, + 0x01, 0xda, 0x41, 0x12, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x6e, 0x3a, 0x06, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x5a, 0x3c, 0x3a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x32, 0x32, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x32, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb9, 0x01, 0x0a, 0x0c, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x50, 0x5a, 0x2d, 0x2a, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2a, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x80, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x75, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x66, 0x5a, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xed, 0x01, 0x0a, 0x10, 0x47, 0x65, + 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x73, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x66, 0x5a, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x2a, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x90, 0x02, 0x0a, 0x13, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, 0x01, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x74, 0x5a, 0x3f, 0x12, 0x3d, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x8c, 0x02, 0x0a, + 0x14, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x89, 0x01, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x7c, 0x3a, + 0x01, 0x2a, 0x5a, 0x43, 0x3a, 0x01, 0x2a, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x13, + 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x88, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, @@ -1284,72 +1373,94 @@ var file_google_cloud_secretmanager_v1_service_proto_rawDesc = []byte{ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x3a, 0x01, 0x2a, 0x22, 0x31, - 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0xc6, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x72, - 0x6f, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x86, 0x01, 0x0a, 0x0c, 0x53, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x3a, 0x01, - 0x2a, 0x22, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x83, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, - 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x01, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x7a, 0x3a, 0x01, 0x2a, 0x5a, + 0x42, 0x3a, 0x01, 0x2a, 0x22, 0x3d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x22, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8c, 0x02, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x7c, 0x3a, 0x01, 0x2a, 0x5a, 0x43, 0x3a, 0x01, + 0x2a, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, + 0x79, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, + 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0xc9, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x7e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x78, 0x3a, 0x01, 0x2a, 0x5a, 0x41, 0x3a, 0x01, + 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x78, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x72, 0x5a, 0x3e, 0x12, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xac, 0x01, 0x0a, 0x12, 0x54, 0x65, - 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x3a, 0x01, 0x2a, - 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x50, 0xca, 0x41, 0x1c, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0xe8, 0x01, 0x0a, 0x21, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x47, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x53, 0x4d, 0xaa, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x56, 0x31, 0xca, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5c, - 0x56, 0x31, 0xea, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xf7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x84, 0x01, 0x3a, 0x01, 0x2a, + 0x5a, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0x50, 0xca, 0x41, 0x1c, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x42, 0xe8, 0x01, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, + 0x76, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x70, 0x62, 0x3b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x53, 0x4d, 0xaa, 0x02, 0x1d, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1d, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x20, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1365,7 +1476,7 @@ func file_google_cloud_secretmanager_v1_service_proto_rawDescGZIP() []byte { } var file_google_cloud_secretmanager_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_google_cloud_secretmanager_v1_service_proto_goTypes = []interface{}{ +var file_google_cloud_secretmanager_v1_service_proto_goTypes = []any{ (*ListSecretsRequest)(nil), // 0: google.cloud.secretmanager.v1.ListSecretsRequest (*ListSecretsResponse)(nil), // 1: google.cloud.secretmanager.v1.ListSecretsResponse (*CreateSecretRequest)(nil), // 2: google.cloud.secretmanager.v1.CreateSecretRequest @@ -1444,7 +1555,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { } file_google_cloud_secretmanager_v1_resources_proto_init() if !protoimpl.UnsafeEnabled { - file_google_cloud_secretmanager_v1_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ListSecretsRequest); i { case 0: return &v.state @@ -1456,7 +1567,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ListSecretsResponse); i { case 0: return &v.state @@ -1468,7 +1579,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreateSecretRequest); i { case 0: return &v.state @@ -1480,7 +1591,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AddSecretVersionRequest); i { case 0: return &v.state @@ -1492,7 +1603,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetSecretRequest); i { case 0: return &v.state @@ -1504,7 +1615,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListSecretVersionsRequest); i { case 0: return &v.state @@ -1516,7 +1627,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListSecretVersionsResponse); i { case 0: return &v.state @@ -1528,7 +1639,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetSecretVersionRequest); i { case 0: return &v.state @@ -1540,7 +1651,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*UpdateSecretRequest); i { case 0: return &v.state @@ -1552,7 +1663,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*AccessSecretVersionRequest); i { case 0: return &v.state @@ -1564,7 +1675,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*AccessSecretVersionResponse); i { case 0: return &v.state @@ -1576,7 +1687,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*DeleteSecretRequest); i { case 0: return &v.state @@ -1588,7 +1699,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*DisableSecretVersionRequest); i { case 0: return &v.state @@ -1600,7 +1711,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*EnableSecretVersionRequest); i { case 0: return &v.state @@ -1612,7 +1723,7 @@ func file_google_cloud_secretmanager_v1_service_proto_init() { return nil } } - file_google_cloud_secretmanager_v1_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_secretmanager_v1_service_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*DestroySecretVersionRequest); i { case 0: return &v.state @@ -1659,51 +1770,61 @@ const _ = grpc.SupportPackageIsVersion6 type SecretManagerServiceClient interface { // Lists [Secrets][google.cloud.secretmanager.v1.Secret]. ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) - // Creates a new [Secret][google.cloud.secretmanager.v1.Secret] containing no [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. + // Creates a new [Secret][google.cloud.secretmanager.v1.Secret] containing no + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*Secret, error) - // Creates a new [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] containing secret data and attaches - // it to an existing [Secret][google.cloud.secretmanager.v1.Secret]. + // Creates a new [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] + // containing secret data and attaches it to an existing + // [Secret][google.cloud.secretmanager.v1.Secret]. AddSecretVersion(ctx context.Context, in *AddSecretVersionRequest, opts ...grpc.CallOption) (*SecretVersion, error) // Gets metadata for a given [Secret][google.cloud.secretmanager.v1.Secret]. GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*Secret, error) - // Updates metadata of an existing [Secret][google.cloud.secretmanager.v1.Secret]. + // Updates metadata of an existing + // [Secret][google.cloud.secretmanager.v1.Secret]. UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*Secret, error) // Deletes a [Secret][google.cloud.secretmanager.v1.Secret]. DeleteSecret(ctx context.Context, in *DeleteSecretRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Lists [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. This call does not return secret - // data. + // Lists [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. This + // call does not return secret data. ListSecretVersions(ctx context.Context, in *ListSecretVersionsRequest, opts ...grpc.CallOption) (*ListSecretVersionsResponse, error) - // Gets metadata for a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // Gets metadata for a + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // // `projects/*/secrets/*/versions/latest` is an alias to the most recently // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. GetSecretVersion(ctx context.Context, in *GetSecretVersionRequest, opts ...grpc.CallOption) (*SecretVersion, error) - // Accesses a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. This call returns the secret data. + // Accesses a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // This call returns the secret data. // // `projects/*/secrets/*/versions/latest` is an alias to the most recently // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. AccessSecretVersion(ctx context.Context, in *AccessSecretVersionRequest, opts ...grpc.CallOption) (*AccessSecretVersionResponse, error) // Disables a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to // [DISABLED][google.cloud.secretmanager.v1.SecretVersion.State.DISABLED]. DisableSecretVersion(ctx context.Context, in *DisableSecretVersionRequest, opts ...grpc.CallOption) (*SecretVersion, error) // Enables a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to // [ENABLED][google.cloud.secretmanager.v1.SecretVersion.State.ENABLED]. EnableSecretVersion(ctx context.Context, in *EnableSecretVersionRequest, opts ...grpc.CallOption) (*SecretVersion, error) // Destroys a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to - // [DESTROYED][google.cloud.secretmanager.v1.SecretVersion.State.DESTROYED] and irrevocably destroys the - // secret data. + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // [DESTROYED][google.cloud.secretmanager.v1.SecretVersion.State.DESTROYED] + // and irrevocably destroys the secret data. DestroySecretVersion(ctx context.Context, in *DestroySecretVersionRequest, opts ...grpc.CallOption) (*SecretVersion, error) // Sets the access control policy on the specified secret. Replaces any // existing policy. // - // Permissions on [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] are enforced according - // to the policy set on the associated [Secret][google.cloud.secretmanager.v1.Secret]. + // Permissions on + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] are enforced + // according to the policy set on the associated + // [Secret][google.cloud.secretmanager.v1.Secret]. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Gets the access control policy for a secret. // Returns empty policy if the secret exists and does not have a policy set. @@ -1865,51 +1986,61 @@ func (c *secretManagerServiceClient) TestIamPermissions(ctx context.Context, in type SecretManagerServiceServer interface { // Lists [Secrets][google.cloud.secretmanager.v1.Secret]. ListSecrets(context.Context, *ListSecretsRequest) (*ListSecretsResponse, error) - // Creates a new [Secret][google.cloud.secretmanager.v1.Secret] containing no [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. + // Creates a new [Secret][google.cloud.secretmanager.v1.Secret] containing no + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. CreateSecret(context.Context, *CreateSecretRequest) (*Secret, error) - // Creates a new [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] containing secret data and attaches - // it to an existing [Secret][google.cloud.secretmanager.v1.Secret]. + // Creates a new [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] + // containing secret data and attaches it to an existing + // [Secret][google.cloud.secretmanager.v1.Secret]. AddSecretVersion(context.Context, *AddSecretVersionRequest) (*SecretVersion, error) // Gets metadata for a given [Secret][google.cloud.secretmanager.v1.Secret]. GetSecret(context.Context, *GetSecretRequest) (*Secret, error) - // Updates metadata of an existing [Secret][google.cloud.secretmanager.v1.Secret]. + // Updates metadata of an existing + // [Secret][google.cloud.secretmanager.v1.Secret]. UpdateSecret(context.Context, *UpdateSecretRequest) (*Secret, error) // Deletes a [Secret][google.cloud.secretmanager.v1.Secret]. DeleteSecret(context.Context, *DeleteSecretRequest) (*emptypb.Empty, error) - // Lists [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. This call does not return secret - // data. + // Lists [SecretVersions][google.cloud.secretmanager.v1.SecretVersion]. This + // call does not return secret data. ListSecretVersions(context.Context, *ListSecretVersionsRequest) (*ListSecretVersionsResponse, error) - // Gets metadata for a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // Gets metadata for a + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // // `projects/*/secrets/*/versions/latest` is an alias to the most recently // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. GetSecretVersion(context.Context, *GetSecretVersionRequest) (*SecretVersion, error) - // Accesses a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. This call returns the secret data. + // Accesses a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. + // This call returns the secret data. // // `projects/*/secrets/*/versions/latest` is an alias to the most recently // created [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. AccessSecretVersion(context.Context, *AccessSecretVersionRequest) (*AccessSecretVersionResponse, error) // Disables a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to // [DISABLED][google.cloud.secretmanager.v1.SecretVersion.State.DISABLED]. DisableSecretVersion(context.Context, *DisableSecretVersionRequest) (*SecretVersion, error) // Enables a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to // [ENABLED][google.cloud.secretmanager.v1.SecretVersion.State.ENABLED]. EnableSecretVersion(context.Context, *EnableSecretVersionRequest) (*SecretVersion, error) // Destroys a [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // - // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to - // [DESTROYED][google.cloud.secretmanager.v1.SecretVersion.State.DESTROYED] and irrevocably destroys the - // secret data. + // Sets the [state][google.cloud.secretmanager.v1.SecretVersion.state] of the + // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion] to + // [DESTROYED][google.cloud.secretmanager.v1.SecretVersion.State.DESTROYED] + // and irrevocably destroys the secret data. DestroySecretVersion(context.Context, *DestroySecretVersionRequest) (*SecretVersion, error) // Sets the access control policy on the specified secret. Replaces any // existing policy. // - // Permissions on [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] are enforced according - // to the policy set on the associated [Secret][google.cloud.secretmanager.v1.Secret]. + // Permissions on + // [SecretVersions][google.cloud.secretmanager.v1.SecretVersion] are enforced + // according to the policy set on the associated + // [Secret][google.cloud.secretmanager.v1.Secret]. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Gets the access control policy for a secret. // Returns empty policy if the secret exists and does not have a policy set. diff --git a/vendor/cloud.google.com/go/secretmanager/internal/version.go b/vendor/cloud.google.com/go/secretmanager/internal/version.go index 4518b5fb407..c7e60e4edbc 100644 --- a/vendor/cloud.google.com/go/secretmanager/internal/version.go +++ b/vendor/cloud.google.com/go/secretmanager/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.0" +const Version = "1.13.5" diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 625ad4fbe7e..e9fb55585b9 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,70 @@ # Changes +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03) + + +### Features + +* **storage/transfermanager:** Add DownloadDirectory ([#10430](https://github.com/googleapis/google-cloud-go/issues/10430)) ([0d0e5dd](https://github.com/googleapis/google-cloud-go/commit/0d0e5dd5214769cc2c197991c2ece1303bd600de)) +* **storage/transfermanager:** Automatically shard downloads ([#10379](https://github.com/googleapis/google-cloud-go/issues/10379)) ([05816f9](https://github.com/googleapis/google-cloud-go/commit/05816f9fafd3132c371da37f3a879bb9e8e7e604)) + + +### Bug Fixes + +* **storage/transfermanager:** WaitAndClose waits for Callbacks to finish ([#10504](https://github.com/googleapis/google-cloud-go/issues/10504)) ([0e81002](https://github.com/googleapis/google-cloud-go/commit/0e81002b3a5e560c874d814d28a35a102311d9ef)), refs [#10502](https://github.com/googleapis/google-cloud-go/issues/10502) +* **storage:** Allow empty soft delete on Create ([#10394](https://github.com/googleapis/google-cloud-go/issues/10394)) ([d8bd2c1](https://github.com/googleapis/google-cloud-go/commit/d8bd2c1ffc4f27503a74ded438d8bfbdd7707c63)), refs [#10380](https://github.com/googleapis/google-cloud-go/issues/10380) +* **storage:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **storage:** Retry broken pipe error ([#10374](https://github.com/googleapis/google-cloud-go/issues/10374)) ([2f4daa1](https://github.com/googleapis/google-cloud-go/commit/2f4daa11acf9d3f260fa888333090359c4d9198e)), refs [#9178](https://github.com/googleapis/google-cloud-go/issues/9178) + + +### Documentation + +* **storage/control:** Remove allowlist note from Folders RPCs ([d6c543c](https://github.com/googleapis/google-cloud-go/commit/d6c543c3969016c63e158a862fc173dff60fb8d9)) + +## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10) + + +### Features + +* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71)) +* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146) +* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05)) + + +### Bug Fixes + +* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf)) + + +### Documentation + +* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd)) + +## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13) + + +### Features + +* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7)) +* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91)) + + +### Bug Fixes + +* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289)) +* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90)) +* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) +* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5)) +* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705) +* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478) +* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720) + + +### Documentation + +* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c)) + ## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 74799e55e98..560a5605d0b 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -16,8 +16,6 @@ package storage import ( "context" - "net/http" - "reflect" "cloud.google.com/go/internal/trace" "cloud.google.com/go/storage/internal/apiv2/storagepb" @@ -162,15 +160,6 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) } -func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if a.userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) - } - setClientHeader(call.Header()) -} - func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { var rs []ACLRule for _, item := range items { diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 0344ef9de31..d582a60d0e8 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -116,6 +116,11 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { return &b.defaultObjectACL } +// BucketName returns the name of the bucket. +func (b *BucketHandle) BucketName() string { + return b.name +} + // Object returns an ObjectHandle, which provides operations on the named object. // This call does not perform any network operations such as fetching the object or verifying its existence. // Use methods on ObjectHandle to perform network operations. @@ -479,6 +484,20 @@ type BucketAttrs struct { // cannot be modified once the bucket is created. // ObjectRetention cannot be configured or reported through the gRPC API. ObjectRetentionMode string + + // SoftDeletePolicy contains the bucket's soft delete policy, which defines + // the period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. By default, new buckets will be created with a + // 7 day retention duration. In order to fully disable soft delete, you need + // to set a policy with a RetentionDuration of 0. + SoftDeletePolicy *SoftDeletePolicy + + // HierarchicalNamespace contains the bucket's hierarchical namespace + // configuration. Hierarchical namespace enabled buckets can contain + // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. + // It cannot be modified after bucket creation time. + // UniformBucketLevelAccess must also also be enabled on the bucket. + HierarchicalNamespace *HierarchicalNamespace } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -760,12 +779,35 @@ type Autoclass struct { // TerminalStorageClass: The storage class that objects in the bucket // eventually transition to if they are not read for a certain length of // time. Valid values are NEARLINE and ARCHIVE. + // To modify TerminalStorageClass, Enabled must be set to true. TerminalStorageClass string // TerminalStorageClassUpdateTime represents the time of the most recent // update to "TerminalStorageClass". TerminalStorageClassUpdateTime time.Time } +// SoftDeletePolicy contains the bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. +type SoftDeletePolicy struct { + // EffectiveTime indicates the time from which the policy, or one with a + // greater retention, was effective. This field is read-only. + EffectiveTime time.Time + + // RetentionDuration is the amount of time that soft-deleted objects in the + // bucket will be retained and cannot be permanently deleted. + RetentionDuration time.Duration +} + +// HierarchicalNamespace contains the bucket's hierarchical namespace +// configuration. Hierarchical namespace enabled buckets can contain +// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. +type HierarchicalNamespace struct { + // Enabled indicates whether hierarchical namespace features are enabled on + // the bucket. This can only be set at bucket creation time currently. + Enabled bool +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -803,6 +845,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { RPO: toRPO(b), CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), Autoclass: toAutoclassFromRaw(b.Autoclass), + SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace), }, nil } @@ -836,6 +880,8 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based Autoclass: toAutoclassFromProto(b.GetAutoclass()), + SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace), } } @@ -891,6 +937,8 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), Autoclass: b.Autoclass.toRawAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(), } } @@ -951,6 +999,8 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), Autoclass: b.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(), } } @@ -1032,6 +1082,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: ua.RPO.String(), Autoclass: ua.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(), Labels: ua.setLabels, } } @@ -1149,9 +1200,15 @@ type BucketAttrsToUpdate struct { RPO RPO // If set, updates the autoclass configuration of the bucket. + // To disable autoclass on the bucket, set to an empty &Autoclass{}. + // To update the configuration for Autoclass.TerminalStorageClass, + // Autoclass.Enabled must also be set to true. // See https://cloud.google.com/storage/docs/using-autoclass for more information. Autoclass *Autoclass + // If set, updates the soft delete policy of the bucket. + SoftDeletePolicy *SoftDeletePolicy + // acl is the list of access control rules on the bucket. // It is unexported and only used internally by the gRPC client. // Library users should use ACLHandle methods directly. @@ -1273,6 +1330,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } + if ua.SoftDeletePolicy != nil { + if ua.SoftDeletePolicy.RetentionDuration == 0 { + rb.NullFields = append(rb.NullFields, "SoftDeletePolicy") + rb.SoftDeletePolicy = nil + } else { + rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy() + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -2053,6 +2118,92 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { } } +func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + // ForceSendFields must be set to send a zero value for RetentionDuration and disable + // soft delete. + return &raw.BucketSoftDeletePolicy{ + RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()), + ForceSendFields: []string{"RetentionDurationSeconds"}, + } +} + +func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + return &storagepb.Bucket_SoftDeletePolicy{ + RetentionDuration: durationpb.New(p.RetentionDuration), + } +} + +func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + + policy := &SoftDeletePolicy{ + RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second, + } + + // Return EffectiveTime only if parsed to a valid value. + if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil { + policy.EffectiveTime = t + } + + return policy +} + +func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + return &SoftDeletePolicy{ + EffectiveTime: p.GetEffectiveTime().AsTime(), + RetentionDuration: p.GetRetentionDuration().AsDuration(), + } +} + +func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace { + if hns == nil { + return nil + } + return &storagepb.Bucket_HierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace { + if hns == nil { + return nil + } + return &raw.BucketHierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace { + if p == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: p.Enabled, + } +} + +func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace { + if r == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: r.Enabled, + } +} + // Objects returns an iterator over the objects in the bucket that match the // Query q. If q is nil, no filtering is done. Objects will be iterated over // lexicographically by name. diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 70b2a280e3b..bbe89276a43 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -59,8 +59,9 @@ type storageClient interface { // Object metadata methods. DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) + RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) // Default Object ACL methods. @@ -182,16 +183,6 @@ type storageOption interface { Apply(s *settings) } -func withGAXOptions(opts ...gax.CallOption) storageOption { - return &gaxOption{opts} -} - -type gaxOption struct { - opts []gax.CallOption -} - -func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } - func withRetryConfig(rc *retryConfig) storageOption { return &retryOption{rc} } @@ -294,6 +285,14 @@ type newRangeReaderParams struct { readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. } +type getObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + softDeleted bool +} + type updateObjectParams struct { bucket, object string uattrs *ObjectAttrsToUpdate @@ -303,6 +302,14 @@ type updateObjectParams struct { overrideRetention *bool } +type restoreObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + copySourceACL bool +} + type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index b23cebcb836..c274c762ea4 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -350,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor: // Use client as usual. If the application is running within GCP, users may get better performance by -enabling Google Direct Access (enabling requests to skip some proxy steps). To enable, +enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add the following side-effect imports to your application: @@ -359,6 +359,13 @@ the following side-effect imports to your application: _ "google.golang.org/grpc/xds/googledirectpath" ) +# Storage Control API + +Certain control plane and long-running operations for Cloud Storage (including Folder +and Managed Folder operations) are supported via the autogenerated Storage Control +client, which is available as a subpackage in this module. See package docs at +[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -367,5 +374,6 @@ the following side-effect imports to your application: [impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview [custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata +[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index e337213f03f..d81a17b6b04 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -28,7 +28,6 @@ import ( "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" - "github.com/golang/protobuf/proto" "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -116,6 +116,8 @@ type grpcStorageClient struct { func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + // Disable all gax-level retries in favor of retry logic in the veneer client. + s.gax = append(s.gax, gax.WithRetry(nil)) config := newStorageConfig(s.clientOption...) if config.readAPIWasSet { @@ -365,6 +367,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.Autoclass != nil { fieldMask.Paths = append(fieldMask.Paths, "autoclass") } + if uattrs.SoftDeletePolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy") + } for label := range uattrs.setLabels { fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) @@ -377,6 +382,13 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. Send a get request for current attrs instead. This + // maintains consistency with JSON bucket updates. + opts = append(opts, idempotent(true)) + return c.GetBucket(ctx, bucket, conds, opts...) + } + var battrs *BucketAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) @@ -419,6 +431,7 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + SoftDeleted: it.query.SoftDeleted, } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) @@ -488,22 +501,25 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.GetObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, // ProjectionFull by default. ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } - if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) + } + if params.softDeleted { + req.SoftDeleted = ¶ms.softDeleted } var attrs *ObjectAttrs @@ -593,6 +609,17 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. To maintain consistency with JSON, we must still + // update the object because metageneration and other fields are + // updated even on an empty update. + // gRPC will fail if the fieldmask is empty, so instead we add an + // output-only field to the update mask. Output-only fields are (and must + // be - see AIP 161) ignored, but allow us to send an empty update because + // any mask that is valid for read (as this one is) must be valid for write. + fieldMask.Paths = append(fieldMask.Paths, "create_time") + } + var attrs *ObjectAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) @@ -606,6 +633,32 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje return attrs, err } +func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.RestoreObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, + CopySourceAcl: ¶ms.copySourceACL, + } + if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.RestoreObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + return attrs, err +} + // Default Object ACL methods. func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -735,7 +788,7 @@ func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } @@ -768,7 +821,7 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. // Selecting a specific generation of this object is not currently supported by the client. func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return nil, err } @@ -778,7 +831,7 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 1b9fbe9dd20..f7811a5d140 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -272,7 +272,6 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, // TODO: Remove fetch method upon integration. This method is internalized into // httpStorageClient.ListHMACKeys() as it is the only caller. call := it.raw.List(it.projectID) - setClientHeader(call.Header()) if pageToken != "" { call = call.PageToken(pageToken) } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index f75d93897d9..0e213a6632a 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -107,12 +107,12 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // Append the emulator host as default endpoint for the user o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) - o = append(o, internaloption.WithDefaultEndpoint(endpoint)) + o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint)) o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) } s.clientOption = o - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, s.clientOption...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -176,7 +176,6 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st bkt.Location = "US" } req := c.raw.Buckets.Insert(project, bkt) - setClientHeader(req.Header()) if attrs != nil && attrs.PredefinedACL != "" { req.PredefinedAcl(attrs.PredefinedACL) } @@ -207,7 +206,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt fetch := func(pageSize int, pageToken string) (token string, err error) { req := c.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) req.Projection("full") req.Prefix(it.Prefix) req.PageToken(pageToken) @@ -245,7 +243,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { s := callSettings(c.settings, opts...) req := c.raw.Buckets.Delete(bucket) - setClientHeader(req.Header()) if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { return err } @@ -259,7 +256,6 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) req := c.raw.Buckets.Get(bucket).Projection("full") - setClientHeader(req.Header()) err := applyBucketConds("httpStorageClient.GetBucket", conds, req) if err != nil { return nil, err @@ -287,7 +283,6 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat s := callSettings(c.settings, opts...) rb := uattrs.toRawBucket() req := c.raw.Buckets.Patch(bucket, rb).Projection("full") - setClientHeader(req.Header()) err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) if err != nil { return nil, err @@ -337,7 +332,9 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } fetch := func(pageSize int, pageToken string) (string, error) { req := c.raw.Objects.List(bucket) - setClientHeader(req.Header()) + if it.query.SoftDeleted { + req.SoftDeleted(it.query.SoftDeleted) + } projection := it.query.Projection if projection == ProjectionDefault { projection = ProjectionFull @@ -409,18 +406,22 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) - req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) - if err := applyConds("Attrs", gen, conds, req); err != nil { + req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { req.UserProject(s.userProject) } - if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { return nil, err } + if params.softDeleted { + req.SoftDeleted(params.softDeleted) + } + var obj *raw.Object var err error err = run(ctx, func(ctx context.Context) error { @@ -547,6 +548,33 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje return newObject(obj), nil } +func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx) + // Do not set the generation here since it's not an optional condition; it gets set above. + if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if params.copySourceACL { + req.CopySourceAcl(params.copySourceACL) + } + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + return newObject(obj), err +} + // Default Object ACL methods. func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -632,7 +660,7 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, }, s.retry, s.idempotent) } -// configureACLCall sets the context, user project and headers on the apiary library call. +// configureACLCall sets the context and user project on the apiary library call. // This will panic if the call does not have the correct methods. func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { vc := reflect.ValueOf(call) @@ -640,7 +668,6 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H if userProject != "" { vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) } - setClientHeader(call.Header()) } // Object ACL methods. @@ -726,7 +753,6 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec return nil, err } var obj *raw.Object - setClientHeader(call.Header()) var err error retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } @@ -775,7 +801,6 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var res *raw.RewriteResponse var err error - setClientHeader(call.Header()) retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } @@ -830,17 +855,18 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa return nil, err } - // Set custom headers passed in via the context. This is only required for XML; - // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. - ctxHeaders := callctx.HeadersFromContext(ctx) - for k, vals := range ctxHeaders { - for _, v := range vals { - req.Header.Add(k, v) - } - } - reopen := readerReopen(ctx, req.Header, params, s, - func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, + func(ctx context.Context) (*http.Response, error) { + // Set custom headers passed in via the context. This is only required for XML; + // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + for _, v := range vals { + req.Header.Set(k, v) + } + } + return c.hc.Do(req.WithContext(ctx)) + }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -854,7 +880,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { call := c.raw.Objects.Get(params.bucket, params.object) - setClientHeader(call.Header()) call.Projection("full") if s.userProject != "" { @@ -970,7 +995,6 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { s := callSettings(c.settings, opts...) call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -991,7 +1015,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p rp := iamToStoragePolicy(policy) call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -1005,7 +1028,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { s := callSettings(c.settings, opts...) call := c.raw.Buckets.TestIamPermissions(resource, permissions) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -1054,7 +1076,6 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc } fetch := func(pageSize int, pageToken string) (token string, err error) { call := c.raw.Projects.HmacKeys.List(project) - setClientHeader(call.Header()) if pageToken != "" { call = call.PageToken(pageToken) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 47300d7a10f..82ec5db902b 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -962,7 +962,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn { func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 9637bc0a5b6..aeb7512f4ab 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/storage/v2/storage.proto package storagepb @@ -8968,7 +8968,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) -var file_google_storage_v2_storage_proto_goTypes = []interface{}{ +var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest @@ -9241,7 +9241,7 @@ func file_google_storage_v2_storage_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DeleteBucketRequest); i { case 0: return &v.state @@ -9253,7 +9253,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetBucketRequest); i { case 0: return &v.state @@ -9265,7 +9265,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreateBucketRequest); i { case 0: return &v.state @@ -9277,7 +9277,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListBucketsRequest); i { case 0: return &v.state @@ -9289,7 +9289,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListBucketsResponse); i { case 0: return &v.state @@ -9301,7 +9301,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*LockBucketRetentionPolicyRequest); i { case 0: return &v.state @@ -9313,7 +9313,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*UpdateBucketRequest); i { case 0: return &v.state @@ -9325,7 +9325,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteNotificationConfigRequest); i { case 0: return &v.state @@ -9337,7 +9337,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationConfigRequest); i { case 0: return &v.state @@ -9349,7 +9349,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*CreateNotificationConfigRequest); i { case 0: return &v.state @@ -9361,7 +9361,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationConfigsRequest); i { case 0: return &v.state @@ -9373,7 +9373,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationConfigsResponse); i { case 0: return &v.state @@ -9385,7 +9385,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest); i { case 0: return &v.state @@ -9397,7 +9397,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*DeleteObjectRequest); i { case 0: return &v.state @@ -9409,7 +9409,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state @@ -9421,7 +9421,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state @@ -9433,7 +9433,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state @@ -9445,7 +9445,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectRequest); i { case 0: return &v.state @@ -9457,7 +9457,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*GetObjectRequest); i { case 0: return &v.state @@ -9469,7 +9469,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectResponse); i { case 0: return &v.state @@ -9481,7 +9481,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectSpec); i { case 0: return &v.state @@ -9493,7 +9493,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectRequest); i { case 0: return &v.state @@ -9505,7 +9505,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectResponse); i { case 0: return &v.state @@ -9517,7 +9517,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state @@ -9529,7 +9529,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state @@ -9541,7 +9541,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsRequest); i { case 0: return &v.state @@ -9553,7 +9553,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state @@ -9565,7 +9565,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state @@ -9577,7 +9577,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state @@ -9589,7 +9589,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*RewriteResponse); i { case 0: return &v.state @@ -9601,7 +9601,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state @@ -9613,7 +9613,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state @@ -9625,7 +9625,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state @@ -9637,7 +9637,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state @@ -9649,7 +9649,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state @@ -9661,7 +9661,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state @@ -9673,7 +9673,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state @@ -9685,7 +9685,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state @@ -9697,7 +9697,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state @@ -9709,7 +9709,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state @@ -9721,7 +9721,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state @@ -9733,7 +9733,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state @@ -9745,7 +9745,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*ServiceConstants); i { case 0: return &v.state @@ -9757,7 +9757,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state @@ -9769,7 +9769,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*BucketAccessControl); i { case 0: return &v.state @@ -9781,7 +9781,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*ChecksummedData); i { case 0: return &v.state @@ -9793,7 +9793,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*ObjectChecksums); i { case 0: return &v.state @@ -9805,7 +9805,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state @@ -9817,7 +9817,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { switch v := v.(*NotificationConfig); i { case 0: return &v.state @@ -9829,7 +9829,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryption); i { case 0: return &v.state @@ -9841,7 +9841,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*Object); i { case 0: return &v.state @@ -9853,7 +9853,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*ObjectAccessControl); i { case 0: return &v.state @@ -9865,7 +9865,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsResponse); i { case 0: return &v.state @@ -9877,7 +9877,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*ProjectTeam); i { case 0: return &v.state @@ -9889,7 +9889,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { switch v := v.(*ServiceAccount); i { case 0: return &v.state @@ -9901,7 +9901,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { switch v := v.(*Owner); i { case 0: return &v.state @@ -9913,7 +9913,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any { switch v := v.(*ContentRange); i { case 0: return &v.state @@ -9925,7 +9925,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state @@ -9937,7 +9937,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state @@ -9949,7 +9949,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Billing); i { case 0: return &v.state @@ -9961,7 +9961,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Cors); i { case 0: return &v.state @@ -9973,7 +9973,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Encryption); i { case 0: return &v.state @@ -9985,7 +9985,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state @@ -9997,7 +9997,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state @@ -10009,7 +10009,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Logging); i { case 0: return &v.state @@ -10021,7 +10021,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v any, i int) any { switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state @@ -10033,7 +10033,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v any, i int) any { switch v := v.(*Bucket_SoftDeletePolicy); i { case 0: return &v.state @@ -10045,7 +10045,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Versioning); i { case 0: return &v.state @@ -10057,7 +10057,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Website); i { case 0: return &v.state @@ -10069,7 +10069,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v any, i int) any { switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state @@ -10081,7 +10081,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Autoclass); i { case 0: return &v.state @@ -10093,7 +10093,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v any, i int) any { switch v := v.(*Bucket_HierarchicalNamespace); i { case 0: return &v.state @@ -10105,7 +10105,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state @@ -10117,7 +10117,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state @@ -10129,7 +10129,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -10141,7 +10141,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -10154,48 +10154,48 @@ func file_google_storage_v2_storage_proto_init() { } } } - file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{ (*BidiWriteObjectRequest_UploadId)(nil), (*BidiWriteObjectRequest_WriteObjectSpec)(nil), (*BidiWriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{ (*BidiWriteObjectResponse_PersistedSize)(nil), (*BidiWriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 1c52a3504b2..e5b2de09172 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.40.0" +const Version = "1.43.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 1b52eb5d2c6..de57b4bbbbc 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,8 +70,8 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) - if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { - return true, err + if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) } attempts++ return !errorFunc(err), err @@ -84,7 +84,21 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + // TODO: remove this once the respective transport packages merge xGoogHeader. + // Also remove gl-go at that time, as it will be repeated. + hdrs := callctx.HeadersFromContext(ctx) + for _, v := range hdrs[xGoogHeaderKey] { + xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") + } + + if hdrs[xGoogHeaderKey] != nil { + // Replace the key instead of adding it, if there was anything to merge with. + hdrs[xGoogHeaderKey] = []string{xGoogHeader} + } else { + // TODO: keep this line when removing the above code. + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + } + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) return ctx } @@ -105,22 +119,20 @@ func ShouldRetry(err error) bool { if errors.Is(err, io.ErrUnexpectedEOF) { return true } + if errors.Is(err, net.ErrClosed) { + return true + } switch e := err.(type) { - case *net.OpError: - if strings.Contains(e.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } case *googleapi.Error: // Retry on 408, 429, and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: + case *net.OpError, *url.Error: // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). // Unfortunately the error type is unexported, so we resort to string // matching. - retriable := []string{"connection refused", "connection reset"} + retriable := []string{"connection refused", "connection reset", "broken pipe"} for _, s := range retriable { if strings.Contains(e.Error(), s) { return true diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 56f3e3daa5c..1d6cfdf5984 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -116,7 +116,7 @@ func toProtoNotification(n *Notification) *storagepb.NotificationConfig { } } -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") +var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) // parseNotificationTopic extracts the project and topic IDs from from the full // resource name returned by the service. If the name is malformed, it returns diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index e72ceb78f06..debdb0f52d5 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -44,10 +44,14 @@ type storageClientOption interface { ApplyStorageOpt(*storageConfig) } -// WithJSONReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the JSON API for object reads. Currently, the -// default API used for reads is XML. -// Setting this option is required to use the GenerationNotMatch condition. +// WithJSONReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage JSON API for object +// reads. Currently, the default API used for reads is XML, but JSON will +// become the default in a future release. +// +// Setting this option is required to use the GenerationNotMatch condition. We +// also recommend using JSON reads to ensure consistency with other client +// operations (all of which use JSON by default). // // Note that when this option is set, reads will return a zero date for // [ReaderObjectAttrs].LastModified and may return a different value for @@ -56,10 +60,11 @@ func WithJSONReads() option.ClientOption { return &withReadAPI{useJSON: true} } -// WithXMLReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the XML API for object reads. +// WithXMLReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage XML API for object reads. // -// This is the current default. +// This is the current default, but the default will switch to JSON in a future +// release. func WithXMLReads() option.ClientOption { return &withReadAPI{useJSON: false} } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 0b228a6a76c..6da2432f004 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -72,6 +72,12 @@ type ReaderObjectAttrs struct { // ErrObjectNotExist will be returned if the object is not found. // // The caller must call Close on the returned Reader when done reading. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { return o.NewRangeReader(ctx, 0, -1) } @@ -86,6 +92,12 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // decompressive transcoding per https://cloud.google.com/storage/docs/transcoding // that file will be served back whole, regardless of the requested range as // Google Cloud Storage dictates. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { // This span covers the life of the reader. It is closed via the context // in Reader.Close. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index c01085f35dd..b6316fa668f 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -117,10 +117,6 @@ type Client struct { // tc is the transport-agnostic client implemented with either gRPC or HTTP. tc storageClient - // useGRPC flags whether the client uses gRPC. This is needed while the - // integration piece is only partially complete. - // TODO: remove before merging to main. - useGRPC bool } // NewClient creates a new Google Cloud Storage client using the HTTP transport. @@ -180,12 +176,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error opts = append([]option.ClientOption{ option.WithoutAuthentication(), internaloption.SkipDialSettingsValidation(), - internaloption.WithDefaultEndpoint(endpoint), + internaloption.WithDefaultEndpointTemplate(endpoint), internaloption.WithDefaultMTLSEndpoint(endpoint), }, opts...) } - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -232,13 +228,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // You may configure the client by passing in options from the [google.golang.org/api/option] // package. func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { return nil, err } - return &Client{tc: tc, useGRPC: true}, nil + return &Client{tc: tc}, nil } // Close closes the Client. @@ -898,6 +893,7 @@ type ObjectHandle struct { readCompressed bool // Accept-Encoding: gzip retry *retryConfig overrideRetention *bool + softDeleted bool } // ACL provides access to the object's access control list. @@ -952,7 +948,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error return nil, err } opts := makeStorageOpts(true, o.retry, o.userProject) - return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) + return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...) } // Update updates an object with the provided attributes. See @@ -975,7 +971,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( gen: o.gen, encryptionKey: o.encryptionKey, conds: o.conds, - overrideRetention: o.overrideRetention}, opts...) + overrideRetention: o.overrideRetention, + }, opts...) } // BucketName returns the name of the bucket. @@ -1057,6 +1054,50 @@ func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { return &o2 } +// SoftDeleted returns an object handle that can be used to get an object that +// has been soft deleted. To get a soft deleted object, the generation must be +// set on the object using ObjectHandle.Generation. +// Note that an error will be returned if a live object is queried using this. +func (o *ObjectHandle) SoftDeleted() *ObjectHandle { + o2 := *o + o2.softDeleted = true + return &o2 +} + +// RestoreOptions allows you to set options when restoring an object. +type RestoreOptions struct { + /// CopySourceACL indicates whether the restored object should copy the + // access controls of the source object. Only valid for buckets with + // fine-grained access. If uniform bucket-level access is enabled, setting + // CopySourceACL will cause an error. + CopySourceACL bool +} + +// Restore will restore a soft-deleted object to a live object. +// Note that you must specify a generation to use this method. +func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + + // Since the generation is required by restore calls, we set the default to + // 0 instead of a negative value, which returns a more descriptive error. + gen := o.gen + if o.gen == defaultGen { + gen = 0 + } + + // Restore is always idempotent because Generation is a required param. + sOpts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.RestoreObject(ctx, &restoreObjectParams{ + bucket: o.bucket, + object: o.object, + gen: gen, + conds: o.conds, + copySourceACL: opts.CopySourceACL, + }, sOpts...) +} + // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -1390,6 +1431,21 @@ type ObjectAttrs struct { // Retention contains the retention configuration for this object. // ObjectRetention cannot be configured or reported through the gRPC API. Retention *ObjectRetention + + // SoftDeleteTime is the time when the object became soft-deleted. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + SoftDeleteTime time.Time + + // HardDeleteTime is the time when the object will be permanently deleted. + // Only set when an object becomes soft-deleted with a soft delete policy. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + HardDeleteTime time.Time } // ObjectRetention contains the retention configuration for this object. @@ -1494,6 +1550,8 @@ func newObject(o *raw.Object) *ObjectAttrs { CustomTime: convertTime(o.CustomTime), ComponentCount: o.ComponentCount, Retention: toObjectRetention(o.Retention), + SoftDeleteTime: convertTime(o.SoftDeleteTime), + HardDeleteTime: convertTime(o.HardDeleteTime), } } @@ -1529,6 +1587,8 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { Updated: convertProtoTime(o.GetUpdateTime()), CustomTime: convertProtoTime(o.GetCustomTime()), ComponentCount: int64(o.ComponentCount), + SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()), + HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()), } } @@ -1637,6 +1697,11 @@ type Query struct { // prefixes returned by the query. Only applicable if Delimiter is set to /. // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. IncludeFoldersAsPrefixes bool + + // SoftDeleted indicates whether to list soft-deleted objects. + // If true, only objects that have been soft-deleted will be listed. + // By default, soft-deleted objects are not listed. + SoftDeleted bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1672,6 +1737,8 @@ var attrToFieldMap = map[string]string{ "CustomTime": "customTime", "ComponentCount": "componentCount", "Retention": "retention", + "HardDeleteTime": "hardDeleteTime", + "SoftDeleteTime": "softDeleteTime", } // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1704,6 +1771,8 @@ var attrToProtoFieldMap = map[string]string{ "CustomerKeySHA256": "customer_encryption", "CustomTime": "custom_time", "ComponentCount": "component_count", + "HardDeleteTime": "hard_delete_time", + "SoftDeleteTime": "soft_delete_time", // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. // "MediaLink": "mediaLink", // TODO: add object retention - b/308194853 @@ -2284,7 +2353,6 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { o := makeStorageOpts(true, c.retry, "") return c.tc.GetServiceAccount(ctx, projectID, o...) - } // bucketResourceName formats the given project ID and bucketResourceName ID diff --git a/vendor/github.com/Azure/azure-kusto-go/kusto/internal/version/version.go b/vendor/github.com/Azure/azure-kusto-go/kusto/internal/version/version.go index ca4a11c694d..41b70be3eec 100644 --- a/vendor/github.com/Azure/azure-kusto-go/kusto/internal/version/version.go +++ b/vendor/github.com/Azure/azure-kusto-go/kusto/internal/version/version.go @@ -2,4 +2,4 @@ package version // Kusto is the version of this client package that is communicated to the server. -const Kusto = "0.15.1" +const Kusto = "0.16.1" diff --git a/vendor/github.com/Azure/azure-kusto-go/kusto/kcsb.go b/vendor/github.com/Azure/azure-kusto-go/kusto/kcsb.go index c01c3258336..e7b922348ae 100644 --- a/vendor/github.com/Azure/azure-kusto-go/kusto/kcsb.go +++ b/vendor/github.com/Azure/azure-kusto-go/kusto/kcsb.go @@ -2,6 +2,7 @@ package kusto import ( "fmt" + "os" "strconv" "strings" @@ -11,29 +12,30 @@ import ( ) type ConnectionStringBuilder struct { - DataSource string - AadUserID string - Password string - UserToken string - ApplicationClientId string - ApplicationKey string - AuthorityId string - ApplicationCertificate string - ApplicationCertificateThumbprint string - SendCertificateChain bool - ApplicationToken string - AzCli bool - MsiAuthentication bool - WorkloadAuthentication bool - FederationTokenFilePath string - ManagedServiceIdentity string - InteractiveLogin bool - RedirectURL string - DefaultAuth bool - ClientOptions *azcore.ClientOptions - ApplicationForTracing string - UserForTracing string - TokenCredential azcore.TokenCredential + DataSource string + AadUserID string + Password string + UserToken string + ApplicationClientId string + ApplicationKey string + AuthorityId string + ApplicationCertificatePath string + ApplicationCertificateBytes []byte + ApplicationCertificatePassword []byte + SendCertificateChain bool + ApplicationToken string + AzCli bool + MsiAuthentication bool + WorkloadAuthentication bool + FederationTokenFilePath string + ManagedServiceIdentity string + InteractiveLogin bool + RedirectURL string + DefaultAuth bool + ClientOptions *azcore.ClientOptions + ApplicationForTracing string + UserForTracing string + TokenCredential azcore.TokenCredential } const ( @@ -95,9 +97,7 @@ func assignValue(kcsb *ConnectionStringBuilder, rawKey string, value string) err case applicationKey: kcsb.ApplicationKey = value case applicationCertificate: - kcsb.ApplicationCertificate = value - case applicationCertificateThumbprint: - kcsb.ApplicationCertificateThumbprint = value + kcsb.ApplicationCertificatePath = value case sendCertificateChain: bval, _ := strconv.ParseBool(value) kcsb.SendCertificateChain = bval @@ -155,8 +155,9 @@ func (kcsb *ConnectionStringBuilder) resetConnectionString() { kcsb.ApplicationClientId = "" kcsb.ApplicationKey = "" kcsb.AuthorityId = "" - kcsb.ApplicationCertificate = "" - kcsb.ApplicationCertificateThumbprint = "" + kcsb.ApplicationCertificatePath = "" + kcsb.ApplicationCertificateBytes = nil + kcsb.ApplicationCertificatePassword = nil kcsb.SendCertificateChain = false kcsb.ApplicationToken = "" kcsb.AzCli = false @@ -204,17 +205,34 @@ func (kcsb *ConnectionStringBuilder) WithAadAppKey(appId string, appKey string, return kcsb } -// WithAppCertificate Creates a Kusto Connection string builder that will authenticate with AAD application using a certificate. -func (kcsb *ConnectionStringBuilder) WithAppCertificate(appId string, certificate string, thumprint string, sendCertChain bool, authorityID string) *ConnectionStringBuilder { +// WithAppCertificatePath Creates a Kusto Connection string builder that will authenticate with AAD application using a certificate. +func (kcsb *ConnectionStringBuilder) WithAppCertificatePath(appId string, certificatePath string, password []byte, sendCertChain bool, authorityID string) *ConnectionStringBuilder { requireNonEmpty(dataSource, kcsb.DataSource) - requireNonEmpty(applicationCertificate, certificate) + requireNonEmpty(applicationCertificate, certificatePath) requireNonEmpty(authorityId, authorityID) kcsb.resetConnectionString() kcsb.ApplicationClientId = appId kcsb.AuthorityId = authorityID - kcsb.ApplicationCertificate = certificate - kcsb.ApplicationCertificateThumbprint = thumprint + kcsb.ApplicationCertificatePath = certificatePath + kcsb.ApplicationCertificatePassword = password + kcsb.SendCertificateChain = sendCertChain + return kcsb +} + +// WithAppCertificateBytes Creates a Kusto Connection string builder that will authenticate with AAD application using a certificate. +func (kcsb *ConnectionStringBuilder) WithAppCertificateBytes(appId string, certificateBytes []byte, password []byte, sendCertChain bool, authorityID string) *ConnectionStringBuilder { + requireNonEmpty(dataSource, kcsb.DataSource) + requireNonEmpty(authorityId, authorityID) + if len(certificateBytes) == 0 { + panic("error: Certificate cannot be null") + } + kcsb.resetConnectionString() + kcsb.ApplicationClientId = appId + kcsb.AuthorityId = authorityID + + kcsb.ApplicationCertificateBytes = certificateBytes + kcsb.ApplicationCertificatePassword = password kcsb.SendCertificateChain = sendCertChain return kcsb } @@ -360,12 +378,23 @@ func (kcsb *ConnectionStringBuilder) newTokenProvider() (*TokenProvider, error) return cred, nil } - case !isEmpty(kcsb.ApplicationCertificate): + case !isEmpty(kcsb.ApplicationCertificatePath) || len(kcsb.ApplicationCertificateBytes) != 0: init = func(ci *CloudInfo, cliOpts *azcore.ClientOptions, appClientId string) (azcore.TokenCredential, error) { opts := &azidentity.ClientCertificateCredentialOptions{ClientOptions: *cliOpts} opts.SendCertificateChain = kcsb.SendCertificateChain - cert, thumprintKey, err := azidentity.ParseCertificates([]byte(kcsb.ApplicationCertificate), []byte(kcsb.ApplicationCertificateThumbprint)) + bytes := kcsb.ApplicationCertificateBytes + if !isEmpty(kcsb.ApplicationCertificatePath) { + // read the certificate from the file + fileBytes, err := os.ReadFile(kcsb.ApplicationCertificatePath) + if err != nil { + return nil, kustoErrors.E(kustoErrors.OpTokenProvider, kustoErrors.KOther, + fmt.Errorf("error: Couldn't read certificate file: %s", err)) + } + bytes = fileBytes + } + + cert, thumprintKey, err := azidentity.ParseCertificates(bytes, kcsb.ApplicationCertificatePassword) if err != nil { return nil, kustoErrors.E(kustoErrors.OpTokenProvider, kustoErrors.KOther, err) } diff --git a/vendor/github.com/Azure/azure-kusto-go/kusto/trustedendpoints/well_known_kusto_endpoints.json b/vendor/github.com/Azure/azure-kusto-go/kusto/trustedendpoints/well_known_kusto_endpoints.json index 11c4b637522..7a5aca6ad80 100644 --- a/vendor/github.com/Azure/azure-kusto-go/kusto/trustedendpoints/well_known_kusto_endpoints.json +++ b/vendor/github.com/Azure/azure-kusto-go/kusto/trustedendpoints/well_known_kusto_endpoints.json @@ -20,7 +20,9 @@ ".playfab.com", ".azureplayfab.com", ".kusto.data.microsoft.com", - ".kusto.fabric.microsoft.com" + ".kusto.fabric.microsoft.com", + ".api.securityplatform.microsoft.com", + ".securitycenter.windows.com" ], "AllowedKustoHostnames": [ "ade.applicationinsights.io", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index a6675492b1a..1a9cedbaf02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,26 @@ # Release History +## 1.13.0 (2024-07-16) + +### Features Added + +- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request. + +## 1.12.0 (2024-06-06) + +### Features Added + +* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success. +* Added func `NewUUID` to the `runtime` package for generating UUIDs. + +### Bugs Fixed + +* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases. + +### Other Changes + +* Updated dependencies. + ## 1.11.1 (2024-04-02) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index 187fe82b97c..00f2d5a0ab9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -192,7 +192,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err } if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { - //provider resource can only be on a tenant or a subscription parent + // provider resource can only be on a tenant or a subscription parent if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { return nil, fmt.Errorf("invalid resource ID: %s", id) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 039b758bf98..6a7c916b43e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -34,18 +34,22 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, Scopes: []string{conf.Audience + "/.default"}, }) + // we don't want to modify the underlying array in plOpts.PerRetry perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) - plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + perRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + plOpts.PerRetry = perRetry if !options.DisableRPRegistration { regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) if err != nil { return azruntime.Pipeline{}, err } + // we don't want to modify the underlying array in plOpts.PerCall perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) copy(perCall, plOpts.PerCall) - plOpts.PerCall = append(perCall, regPolicy) + perCall = append(perCall, regPolicy) + plOpts.PerCall = perCall } if plOpts.APIVersion.Name == "" { plOpts.APIVersion.Name = "api-version" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 3041984d9b1..e3e2d4e588a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -7,6 +7,7 @@ package exported import ( + "bytes" "context" "encoding/base64" "errors" @@ -67,6 +68,42 @@ func (ov opValues) get(value any) bool { return ok } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + // NewRequest creates a new Request with the specified input. // Exported as runtime.NewRequest(). func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index ccd4794e9e9..a5346276056 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -155,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 0d781b31d0c..8751b05147f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -131,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 51aede8a2b8..7f8d11b8ba3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -124,7 +124,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { return exported.NewResponseError(p.resp) } - return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out) } // SanitizePollerPath removes any fake-appended suffix from a URL's path. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index 7a56c5211b7..048285275df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -119,5 +119,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index ac1c0efb5ac..03699fd7629 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -115,10 +115,13 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error + + // when the payload is included with the status monitor on + // terminal success it's in the "result" JSON property + payloadPath := "result" + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { - // no final GET required, terminal response should have it } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { return rlErr } else if rl != "" { @@ -134,6 +137,8 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { + // no JSON path when making a final GET request + payloadPath = "" resp, err := p.pl.Do(req) if err != nil { return err @@ -141,5 +146,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index eb3cf651db0..6a7a32e0342 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -159,7 +159,7 @@ func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, up // ResultHelper processes the response as success or failure. // In the success case, it unmarshals the payload into either a new instance of T or out. // In the failure case, it creates an *azcore.Response error from the response. -func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { +func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error { // short-circuit the simple success case with no response body to unmarshal if resp.StatusCode == http.StatusNoContent { return nil @@ -176,6 +176,18 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { if err != nil { return err } + + if jsonPath != "" && len(payload) > 0 { + // extract the payload from the specified JSON path. + // do this before the zero-length check in case there + // is no payload. + jsonBody := map[string]json.RawMessage{} + if err = json.Unmarshal(payload, &jsonBody); err != nil { + return err + } + payload = jsonBody[jsonPath] + } + if len(payload) == 0 { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 03691cbf024..e5b28a9b1ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.11.1" + Version = "v1.13.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index cffe692d7e3..b960cff0b2d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -94,6 +94,10 @@ type FetcherForNextLinkOptions struct { // NextReq is the func to be called when requesting subsequent pages. // Used for paged operations that have a custom next link operation. NextReq func(context.Context, string) (*policy.Request, error) + + // StatusCodes contains additional HTTP status codes indicating success. + // The default value is http.StatusOK. + StatusCodes []int } // FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. @@ -105,10 +109,13 @@ type FetcherForNextLinkOptions struct { func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { var req *policy.Request var err error + if options == nil { + options = &FetcherForNextLinkOptions{} + } if nextLink == "" { req, err = firstReq(ctx) } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { - if options != nil && options.NextReq != nil { + if options.NextReq != nil { req, err = options.NextReq(ctx, nextLink) } else { req, err = NewRequest(ctx, http.MethodGet, nextLink) @@ -121,7 +128,9 @@ func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, first if err != nil { return nil, err } - if !HasStatusCode(resp, http.StatusOK) { + successCodes := []int{http.StatusOK} + successCodes = append(successCodes, options.StatusCodes...) + if !HasStatusCode(resp, successCodes...) { return nil, NewResponseError(resp) } return resp, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 06ac95b1b71..7d34b7803af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "mime/multipart" + "net/http" "net/textproto" "net/url" "path" @@ -24,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -44,6 +46,11 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + // EncodeQueryParams will parse and encode any query parameters in the specified URL. // Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { @@ -263,3 +270,12 @@ func SkipBodyDownload(req *policy.Request) { // CtxAPINameKey is used as a context key for adding/retrieving the API name. type CtxAPINameKey = shared.CtxAPINameKey + +// NewUUID returns a new UUID using the RFC4122 algorithm. +func NewUUID() (string, error) { + u, err := uuid.New() + if err != nil { + return "", err + } + return u.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md index 796215fa484..988db23a29c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.7.1 (2024-05-20) + +### Bugs Fixed + +- Emulator strings should allow for hosts other than localhost (PR#22898) + ## 1.7.0 (2024-04-02) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go index ce0e2610dfc..4d11f9f1882 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go @@ -100,14 +100,13 @@ func ParseConnectionString(connStr string) (ConnectionStringProperties, error) { } if csp.Emulator { - // check that they're only connecting to localhost endpointParts := strings.SplitN(csp.Endpoint, ":", 3) // allow for a port, if it exists. - if len(endpointParts) < 2 || endpointParts[0] != "sb" || endpointParts[1] != "//localhost" { - // there should always be at least two parts "sb:" and "//localhost" + if len(endpointParts) < 2 || endpointParts[0] != "sb" { + // there should always be at least two parts "sb:" and "//" // with an optional 3rd piece that's the port "1111". // (we don't need to validate it's a valid host since it's been through url.Parse() above) - return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb://localhost or sb://localhost:, not %s", csp.Endpoint) + return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb:// or sb://:, not %s", csp.Endpoint) } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go index a2402e48ac3..f3079017326 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go @@ -4,4 +4,4 @@ package internal // Version is the semantic version number -const Version = "v1.7.0" +const Version = "v1.7.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index f6c99441ba9..324c8ed0e48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,20 @@ # Release History +## 1.4.0 (2024-07-18) + +### Other Changes +* GetProperties() was called twice in DownloadFile method. Enhanced to call it only once, reducing latency. +* Updated `azcore` version to `1.13.0` + +## 1.4.0-beta.1 (2024-06-14) + +### Features Added +* Updated service version to `2024-05-04`. + +### Other Changes +* Updated `azidentity` version to `1.6.0` +* Updated `azcore` version to `1.12.0` + ## 1.3.2 (2024-04-09) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 1f51959fa3d..e71157a1979 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,4 +1,7 @@ # Azure Blob Storage module for Go +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azblob)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azdatalake%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=2854&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main) > Service Version: 2023-11-03 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index 06b0fd419f0..3bf058976a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -292,8 +292,8 @@ func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (ab *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index d971ff1ec84..83e35475c90 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_71b0a04c12" + "Tag": "go/storage/azblob_bbf7a929e3" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 7b55cd1431a..ec6907fbdc8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -185,9 +185,9 @@ func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOption // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { +func (b *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { opts, leaseAccessConditions, modifiedAccessConditions := o.format() - resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &httpHeaders, leaseAccessConditions, modifiedAccessConditions) return resp, err } @@ -448,6 +448,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil return 0, err } size = *props.ContentLength - do.Range.Offset + do.Range.Count = size } else { size = count } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index 1deedb5902e..a625c995327 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -101,7 +101,6 @@ func (s *RetryReader) setResponse(r io.ReadCloser) { // Read from retry reader func (s *RetryReader) Read(p []byte) (n int, err error) { for try := int32(0); ; try++ { - //fmt.Println(try) // Comment out for debugging. if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 8901f1dbd51..7a3ab3fe8dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -364,8 +364,8 @@ func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (bb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 720d6e8fdbd..b3059f6e1f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - ModuleVersion = "v1.3.2" + ModuleVersion = "v1.4.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 92dc7e2d31e..9714432c2f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -19,10 +19,10 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.61" +use: "@autorest/go@4.0.0-preview.65" ``` -### Updating service version to 2023-11-03 +### Updating service version to 2024-05-04 ```yaml directive: - from: @@ -36,7 +36,7 @@ directive: transform: >- return $. replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). - replaceAll(`2021-12-02`, `2023-11-03`); + replaceAll(`2021-12-02`, `2024-05-04`); ``` ### Undo breaking change with BlobName @@ -293,7 +293,7 @@ directive: replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); -- from: zz_response_types.go +- from: zz_responses.go where: $ transform: >- return $. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go index 8f2bbbb7cb8..1c2a0368075 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -6,4 +6,4 @@ package generated -const ServiceVersion = "2023-11-03" +const ServiceVersion = "2024-05-04" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index 797318611c3..8b24ee0898b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -72,54 +69,54 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -244,76 +241,76 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -424,10 +421,25 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -438,21 +450,15 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -460,44 +466,35 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -596,29 +593,29 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index fe568a96c7a..f30b81b970e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -67,15 +64,15 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-action"] = []string{"abort"} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -140,31 +137,31 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -239,30 +236,30 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -346,29 +343,29 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -450,77 +447,77 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-requires-sync"] = []string{"true"} - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -621,12 +618,24 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -634,35 +643,23 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -753,45 +750,45 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if options != nil && options.DeleteType != nil { - reqQP.Set("deletetype", string(*options.DeleteType)) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.DeleteSnapshots != nil { - req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -853,11 +850,11 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -920,25 +917,32 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.RangeGetContentMD5 != nil { - req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if options != nil && options.RangeGetContentCRC64 != nil { - req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -946,29 +950,22 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1167,10 +1164,10 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-request-id"); val != "" { @@ -1222,11 +1219,11 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1295,45 +1292,45 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1549,10 +1546,10 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { @@ -1610,17 +1607,17 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } if options != nil && options.VersionID != nil { reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1630,7 +1627,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1701,38 +1698,38 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.QueryRequest != nil { if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { return nil, err @@ -1930,28 +1927,28 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -2024,28 +2021,28 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -2120,7 +2117,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2128,7 +2125,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti if options != nil && options.ExpiresOn != nil { req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2202,14 +2199,24 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -2217,32 +2224,22 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -2322,20 +2319,20 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2407,12 +2404,12 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2485,15 +2482,24 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -2501,32 +2507,23 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2619,23 +2616,23 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, tags); err != nil { return nil, err } @@ -2704,28 +2701,28 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} - if options != nil && options.RehydratePriority != nil { - req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2785,6 +2782,41 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { if v != nil { @@ -2792,66 +2824,31 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop } } } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if options != nil && options.SealBlob != nil { - req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2931,11 +2928,11 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index b6115b50a65..fe4909d73c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -75,11 +72,30 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -90,24 +106,17 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -115,47 +124,35 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, blocks); err != nil { return nil, err } @@ -257,26 +254,26 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("blocklisttype", string(listType)) reqQP.Set("comp", "blocklist") if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - reqQP.Set("blocklisttype", string(listType)) if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -375,13 +372,31 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -392,21 +407,25 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -414,66 +433,44 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if options != nil && options.CopySourceBlobProperties != nil { - req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -570,21 +567,25 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -592,17 +593,13 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -700,22 +697,23 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -723,35 +721,34 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -848,13 +845,31 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -865,21 +880,18 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -887,50 +899,35 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 95af9e15447..768182775f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index dbc2a293ec6..39958d95801 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -71,22 +68,22 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -163,21 +160,21 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -263,20 +260,20 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -352,17 +349,10 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -372,7 +362,14 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -446,20 +443,20 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -517,27 +514,27 @@ func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, whe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + reqQP.Set("where", where) req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -599,20 +596,20 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -685,11 +682,11 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -758,14 +755,14 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -865,10 +862,9 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -876,18 +872,19 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -955,30 +952,30 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } reqQP.Set("delimiter", delimiter) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1049,19 +1046,19 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1128,13 +1125,13 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "rename") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1142,7 +1139,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo if options != nil && options.SourceLeaseID != nil { req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1207,19 +1204,19 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1288,13 +1285,13 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "undelete") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1304,7 +1301,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options if options != nil && options.DeletedContainerVersion != nil { req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1365,29 +1362,29 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.Access != nil { - req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} type wrapper struct { XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` @@ -1462,12 +1459,19 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "metadata") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } @@ -1478,14 +1482,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } } } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1555,20 +1552,20 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "batch") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 7251de83952..72e2ab2d8ae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index 7e094db8730..e2e64d6fffe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -46,8 +43,12 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er if err := dec.DecodeElement(aux, &start); err != nil { return err } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) + if aux.Expiry != nil && !(*time.Time)(aux.Expiry).IsZero() { + a.Expiry = (*time.Time)(aux.Expiry) + } + if aux.Start != nil && !(*time.Time)(aux.Start).IsZero() { + a.Start = (*time.Time)(aux.Start) + } return nil } @@ -152,19 +153,35 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) if err := dec.DecodeElement(aux, &start); err != nil { return err } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.AccessTierChangeTime != nil && !(*time.Time)(aux.AccessTierChangeTime).IsZero() { + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + } if aux.ContentMD5 != nil { if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { return err } } - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) + if aux.CopyCompletionTime != nil && !(*time.Time)(aux.CopyCompletionTime).IsZero() { + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + } + if aux.CreationTime != nil && !(*time.Time)(aux.CreationTime).IsZero() { + b.CreationTime = (*time.Time)(aux.CreationTime) + } + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + b.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.ExpiresOn != nil && !(*time.Time)(aux.ExpiresOn).IsZero() { + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + } + if aux.ImmutabilityPolicyExpiresOn != nil && !(*time.Time)(aux.ImmutabilityPolicyExpiresOn).IsZero() { + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + } + if aux.LastAccessedOn != nil && !(*time.Time)(aux.LastAccessedOn).IsZero() { + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + b.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -271,8 +288,12 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem if err := dec.DecodeElement(aux, &start); err != nil { return err } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + c.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + c.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -316,7 +337,9 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) if err := dec.DecodeElement(aux, &start); err != nil { return err } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + if aux.LastSyncTime != nil && !(*time.Time)(aux.LastSyncTime).IsZero() { + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + } return nil } @@ -436,8 +459,12 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen if err := dec.DecodeElement(aux, &start); err != nil { return err } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) + if aux.SignedExpiry != nil && !(*time.Time)(aux.SignedExpiry).IsZero() { + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + } + if aux.SignedStart != nil && !(*time.Time)(aux.SignedStart).IsZero() { + u.SignedStart = (*time.Time)(aux.SignedStart) + } return nil } @@ -451,18 +478,8 @@ func populate(m map[string]any, k string, v any) { } } -func populateAny(m map[string]any, k string, v any) { - if v == nil { - return - } else if azcore.IsNullValue(v) { - m[k] = nil - } else { - m[k] = v - } -} - func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go index 216f8b73ae9..35a67a2791a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index cb6a19f7a33..dd040e48a2f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -69,13 +66,25 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -83,41 +92,29 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -218,27 +215,27 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -322,13 +319,28 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if options != nil && options.Tier != nil { req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -336,24 +348,22 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -361,48 +371,35 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - if options != nil && options.BlobSequenceNumber != nil { - req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -498,45 +495,45 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -619,51 +616,51 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *options.Prevsnapshot) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.PrevSnapshotURL != nil { - req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -750,8 +747,25 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -759,33 +773,16 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -868,33 +865,33 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -980,19 +977,31 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -1000,41 +1009,29 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -1158,31 +1155,41 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - req.Raw().Header["x-ms-source-range"] = []string{sourceRange} - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-range"] = []string{rangeParam} if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} @@ -1190,44 +1197,34 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go index 738d23c8f19..adb736607fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index c792fbf094b..ae585294942 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -62,25 +59,25 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + reqQP.Set("where", where) req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -139,11 +136,11 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -213,17 +210,17 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -276,17 +273,17 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "stats") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -348,17 +345,17 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "userdelegationkey") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, keyInfo); err != nil { return nil, err } @@ -405,8 +402,8 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -414,18 +411,18 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -479,17 +476,17 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { return nil, err } @@ -551,13 +548,13 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 58665032972..ee3732ebcc1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -36,7 +33,14 @@ func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { } func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } p, err := time.Parse(time.RFC1123, string(data)) *t = dateTimeRFC1123(p) return err } + +func (t dateTimeRFC1123) String() string { + return time.Time(t).Format(time.RFC1123) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 82b370133fa..e9eac9bcb11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -15,12 +12,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -36,17 +37,36 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + if len(data) == 0 { + return nil + } + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -56,3 +76,7 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { *t = dateTimeRFC3339(p) return err } + +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 1bd0e4de05a..355d0176b3d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index 9f95ad8ae43..5c44af34ae9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -44,7 +44,6 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Create the goroutines that process each operation (in parallel). for g := uint16(0); g < o.Concurrency; g++ { - //grIndex := g go func() { for f := range operationChannel { err := f() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 30d0253af91..63ceac9792e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -380,8 +380,8 @@ func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (pb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 45f730847d2..813fa77a9e6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -255,7 +255,7 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us signature: signature, } - //User delegation SAS specific parameters + // User delegation SAS specific parameters p.signedOID = *udk.SignedOID p.signedTID = *udk.SignedTID p.signedStart = *udk.SignedStart @@ -272,7 +272,7 @@ func getCanonicalName(account string, containerName string, blobName string, dir // Blob: "/blob/account/containername/blobname" elements := []string{"/blob/", account, "/", containerName} if blobName != "" { - elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + elements = append(elements, "/", strings.ReplaceAll(blobName, "\\", "/")) } else if directoryName != "" { elements = append(elements, "/", directoryName) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index 57fe053f07a..758739cb826 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -117,7 +117,7 @@ func (up URLParts) String() string { rawQuery := up.UnparsedParams - //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + // If no snapshot is initially provided, fill it in from the SAS query properties to help the user if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index e97589dcdcb..25697b3c854 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -21,7 +21,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "log" "os" "strings" @@ -325,7 +325,7 @@ func GetSettingsFromFile() (FileSettings, error) { return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") } - contents, err := ioutil.ReadFile(fileLocation) + contents, err := os.ReadFile(fileLocation) if err != nil { return s, err } @@ -488,7 +488,7 @@ func decode(b []byte) ([]byte, error) { } return []byte(string(utf16.Decode(u16))), nil } - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { @@ -636,7 +636,7 @@ func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincip if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } @@ -653,7 +653,7 @@ func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.Mul if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index 186c2eb1863..ac2d47a1644 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59 +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2 USER root diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index e916416d50e..55283cfe4f0 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -3,6 +3,7 @@ services: zookeeper-1: hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '1' @@ -15,6 +16,7 @@ services: zookeeper-2: hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '2' @@ -27,6 +29,7 @@ services: zookeeper-3: hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '3' @@ -39,6 +42,7 @@ services: kafka-1: hostname: 'kafka-1' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -84,6 +88,7 @@ services: kafka-2: hostname: 'kafka-2' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -129,6 +134,7 @@ services: kafka-3: hostname: 'kafka-3' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -174,6 +180,7 @@ services: kafka-4: hostname: 'kafka-4' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -219,6 +226,7 @@ services: kafka-5: hostname: 'kafka-5' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -264,6 +272,7 @@ services: toxiproxy: hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' + init: true healthcheck: test: ['CMD', '/toxiproxy-cli', 'l'] interval: 15s diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go new file mode 100644 index 00000000000..6504a21864c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go @@ -0,0 +1,18 @@ +package aws + +// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. +type AccountIDEndpointMode string + +const ( + // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing + AccountIDEndpointModeUnset AccountIDEndpointMode = "" + + // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present + AccountIDEndpointModePreferred = "preferred" + + // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity + AccountIDEndpointModeRequired = "required" + + // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing + AccountIDEndpointModeDisabled = "disabled" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index 2264200c169..16000d79279 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -162,6 +162,9 @@ type Config struct { // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or // the shared config profile attribute request_min_compression_size_bytes RequestMinCompressSizeBytes int64 + + // Controls how a resolved AWS account ID is handled for endpoint routing. + AccountIDEndpointMode AccountIDEndpointMode } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 714d4ad85cb..98ba7705642 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -90,6 +90,9 @@ type Credentials struct { // The time the credentials will expire at. Should be ignored if CanExpire // is false. Expires time.Time + + // The ID of the account for the credentials. + AccountID string } // Expired returns if the credentials have expired. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go index aa10a9b40f0..99edbf3ee63 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -70,6 +70,10 @@ func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found // The SDK will automatically resolve these endpoints per API client using an // internal endpoint resolvers. If you'd like to provide custom endpoint // resolving behavior you can implement the EndpointResolver interface. +// +// Deprecated: This structure was used with the global [EndpointResolver] +// interface, which has been deprecated in favor of service-specific endpoint +// resolution. See the deprecation docs on that interface for more information. type Endpoint struct { // The base URL endpoint the SDK API clients will use to make API calls to. // The SDK will suffix URI path and query elements to this endpoint. @@ -124,6 +128,8 @@ type Endpoint struct { } // EndpointSource is the endpoint source type. +// +// Deprecated: The global [Endpoint] structure is deprecated. type EndpointSource int const ( @@ -161,19 +167,25 @@ func (e *EndpointNotFoundError) Unwrap() error { // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. // -// Deprecated: See EndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Setting a value for +// EndpointResolver on aws.Config or service client options will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. type EndpointResolver interface { ResolveEndpoint(service, region string) (Endpoint, error) } // EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. // -// Deprecated: See EndpointResolverWithOptionsFunc +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverFunc func(service, region string) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. -// -// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { return e(service, region) } @@ -184,11 +196,17 @@ func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, // available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptions interface { ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) } // EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 639ba763097..ba898a1a85f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.1" +const goModuleVersion = "1.30.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go index b0133f4c88d..19d6107c461 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go @@ -112,6 +112,8 @@ type MetricData struct { ResolveEndpointStartTime time.Time ResolveEndpointEndTime time.Time EndpointResolutionDuration time.Duration + GetIdentityStartTime time.Time + GetIdentityEndTime time.Time InThroughput float64 OutThroughput float64 RetryCount int @@ -122,6 +124,7 @@ type MetricData struct { OperationName string PartitionID string Region string + UserAgent string RequestContentLength int64 Stream StreamMetrics Attempts []AttemptMetrics @@ -144,8 +147,6 @@ type AttemptMetrics struct { ConnRequestedTime time.Time ConnObtainedTime time.Time ConcurrencyAcquireDuration time.Duration - CredentialFetchStartTime time.Time - CredentialFetchEndTime time.Time SignStartTime time.Time SignEndTime time.Time SigningDuration time.Duration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index db7cda42d92..ff0bc921f1b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "sort" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -30,6 +31,7 @@ const ( FrameworkMetadata AdditionalMetadata ApplicationIdentifier + FeatureMetadata2 ) func (k SDKAgentKeyType) string() string { @@ -50,6 +52,8 @@ func (k SDKAgentKeyType) string() string { return "lib" case ApplicationIdentifier: return "app" + case FeatureMetadata2: + return "m" case AdditionalMetadata: fallthrough default: @@ -64,9 +68,29 @@ var validChars = map[rune]bool{ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, } +// UserAgentFeature enumerates tracked SDK features. +type UserAgentFeature string + +// Enumerates UserAgentFeature. +const ( + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + UserAgentFeatureGZIPRequestCompression = "L" +) + // RequestUserAgent is a build middleware that set the User-Agent for the request. type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder + features map[UserAgentFeature]struct{} } // NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the @@ -87,6 +111,7 @@ func NewRequestUserAgent() *RequestUserAgent { r := &RequestUserAgent{ sdkAgent: sdkAgent, userAgent: userAgent, + features: map[UserAgentFeature]struct{}{}, } addSDKMetadata(r) @@ -191,6 +216,12 @@ func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) } +// AddUserAgentFeature adds the feature ID to the tracking list to be emitted +// in the final User-Agent string. +func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { + u.features[feature] = struct{}{} +} + // AddSDKAgentKey adds the component identified by name to the User-Agent string. func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { // TODO: should target sdkAgent @@ -227,6 +258,9 @@ func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" updateHTTPHeader(request, userAgent, u.userAgent.Build()) + if len(u.features) > 0 { + updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) + } } func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { @@ -259,3 +293,13 @@ func rules(r rune) rune { return '-' } } + +func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { + fs := make([]string, 0, len(features)) + for f := range features { + fs = append(fs, string(f)) + } + + sort.Strings(fs) + return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 186c488924b..7df617668f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.6.3 (2024-06-28) + +* No change notes available for this release. + # v1.6.2 (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index a0479b9be2a..382f886a92d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.6.2" +const goModuleVersion = "1.6.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index dc703d482d2..b645fbdf132 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -2,12 +2,15 @@ package retry import ( "context" + "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/aws" awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -39,6 +42,10 @@ type Attempt struct { requestCloner RequestCloner } +// define the threshold at which we will consider certain kind of errors to be probably +// caused by clock skew +const skewThreshold = 4 * time.Minute + // NewAttemptMiddleware returns a new Attempt retry middleware. func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { m := &Attempt{ @@ -86,6 +93,9 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn AttemptClockSkew: attemptClockSkew, }) + // Setting clock skew to be used on other context (like signing) + ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) + var attemptResult AttemptResult out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) @@ -185,6 +195,8 @@ func (r *Attempt) handleAttempt( return out, attemptResult, nopRelease, err } + err = wrapAsClockSkew(ctx, err) + //------------------------------ // Is Retryable and Should Retry //------------------------------ @@ -247,6 +259,37 @@ func (r *Attempt) handleAttempt( return out, attemptResult, releaseRetryToken, err } +// errors that, if detected when we know there's a clock skew, +// can be retried and have a high chance of success +var possibleSkewCodes = map[string]struct{}{ + "InvalidSignatureException": {}, + "SignatureDoesNotMatch": {}, + "AuthFailure": {}, +} + +var definiteSkewCodes = map[string]struct{}{ + "RequestExpired": {}, + "RequestInTheFuture": {}, + "RequestTimeTooSkewed": {}, +} + +// wrapAsClockSkew checks if this error could be related to a clock skew +// error and if so, wrap the error. +func wrapAsClockSkew(ctx context.Context, err error) error { + var v interface{ ErrorCode() string } + if !errors.As(err, &v) { + return err + } + if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { + return &retryableClockSkewError{Err: err} + } + _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] + if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { + return &retryableClockSkewError{Err: err} + } + return err +} + // MetricsHeader attaches SDK request metric header for retries to the transport type MetricsHeader struct{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 987affdde6f..acd8d1cc3d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -2,6 +2,7 @@ package retry import ( "errors" + "fmt" "net" "net/url" "strings" @@ -199,3 +200,23 @@ func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { return aws.TrueTernary } + +// retryableClockSkewError marks errors that can be caused by clock skew +// (difference between server time and client time). +// This is returned when there's certain confidence that adjusting the client time +// could allow a retry to succeed +type retryableClockSkewError struct{ Err error } + +func (e *retryableClockSkewError) Error() string { + return fmt.Sprintf("Probable clock skew error: %v", e.Err) +} + +// Unwrap returns the wrapped error. +func (e *retryableClockSkewError) Unwrap() error { + return e.Err +} + +// RetryableError allows the retryer to retry this request +func (e *retryableClockSkewError) RetryableError() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index ca738f234b3..71b1a352171 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -38,7 +38,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index febeb0482db..a9db6433de9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -301,22 +300,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } - mctx := metrics.Context(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchStartTime = sdk.NowTime() - } - } - credentials, err := s.credentialsProvider.Retrieve(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } @@ -337,20 +321,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) } - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignStartTime = sdk.NowTime() - } - } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index bb61904e1d8..dcd896a9bf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -1,48 +1,41 @@ -// Package v4 implements signing for AWS V4 signer +// Package v4 implements the AWS signature version 4 algorithm (commonly known +// as SigV4). // -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. +// For more information about SigV4, see [Signing AWS API requests] in the IAM +// user guide. // -// # Standalone Signer +// While this implementation CAN work in an external context, it is developed +// primarily for SDK use and you may encounter fringe behaviors around header +// canonicalization. // -// Generally using the signer outside of the SDK should not require any additional +// # Pre-escaping a request URI // -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// AWS v4 signature validation requires that the canonical string's URI path +// component must be the escaped form of the HTTP request's path. +// +// The Go HTTP client will perform escaping automatically on the HTTP request. +// This may cause signature validation errors because the request differs from +// the URI path or query from which the signature was generated. // -// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. +// Because of this, we recommend that you explicitly escape the request when +// using this signer outside of the SDK to prevent possible signature mismatch. +// This can be done by setting URL.Opaque on the request. The signer will +// prefer that value, falling back to the return of URL.EscapedPath if unset. // -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: +// When setting URL.Opaque you must do so in the form of: // // "///" // // // e.g. // "//example.com/some/path" // -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath +// The leading "//" and hostname are required or the escaping will not work +// correctly. // -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. +// The TestStandaloneSign unit test provides a complete example of using the +// signer outside of the SDK and pre-escaping the URI path. // -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. +// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html package v4 import ( @@ -402,6 +395,12 @@ func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) query := url.Values{} unsignedHeaders := http.Header{} for k, h := range header { + // literally just this header has this constraint for some stupid reason, + // see #2508 + if k == "X-Amz-Expected-Bucket-Owner" { + k = "x-amz-expected-bucket-owner" + } + if r.IsValid(k) { query[k] = h } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 87ea591aef4..6977bdb781f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,68 @@ +# v1.27.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.17 (2024-06-03) + +* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.27.11 (2024-04-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 50582d89d54..d5226cb0437 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -80,6 +80,9 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile resolveRequestMinCompressSizeBytes, + + // Sets the AccountIDEndpointMode if present in env var or shared config profile + resolveAccountIDEndpointMode, } // A Config represents a generic configuration value or set of values. This type diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 88550198cce..3a06f1412a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -80,6 +80,9 @@ const ( awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" + + awsAccountIDEnv = "AWS_ACCOUNT_ID" + awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" ) var ( @@ -290,6 +293,9 @@ type EnvConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + // Indicates whether account ID will be required/ignored in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -309,6 +315,7 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { + creds.AccountID = os.Getenv(awsAccountIDEnv) creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) cfg.Credentials = creds } @@ -389,6 +396,10 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { + return cfg, err + } + return cfg, nil } @@ -417,6 +428,10 @@ func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, return *c.RequestMinCompressSizeBytes, true, nil } +func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -491,6 +506,28 @@ func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { return nil } +func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch value { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) + } + break + } + return nil +} + // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 46566e90b62..a8fe2a2bbed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.11" +const goModuleVersion = "1.27.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 06596c1b7c8..5f643977b00 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -215,6 +215,8 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { @@ -278,6 +280,10 @@ func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, return *o.RequestMinCompressSizeBytes, true, nil } +func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil +} + // WithRegion is a helper function to construct functional options // that sets Region on config's LoadOptions. Setting the region to // an empty string, will result in the region value being ignored. @@ -323,6 +329,17 @@ func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOpt } } +// WithAccountIDEndpointMode is a helper function to construct functional options +// that sets AccountIDEndpointMode on config's LoadOptions +func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + if m != "" { + o.AccountIDEndpointMode = m + } + return nil + } +} + // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { @@ -824,7 +841,14 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. // -// Deprecated: See WithEndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Use of +// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolver = v @@ -844,6 +868,9 @@ func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.En // that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [WithEndpointResolver]. func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolverWithOptions = v diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 13745fc98fd..043781f1f77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -225,6 +225,23 @@ func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value return } +// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode +type accountIDEndpointModeProvider interface { + getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) +} + +func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(accountIDEndpointModeProvider); ok { + value, found, err = p.getAccountIDEndpointMode(ctx) + if err != nil || found { + break + } + } + } + return +} + // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index fde2e3980e0..41009c7da06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -166,6 +166,22 @@ func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, co return nil } +// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's +// SharedConfig or EnvConfig +func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { + m, found, err := getAccountIDEndpointMode(ctx, configs) + if err != nil { + return err + } + + if !found { + m = aws.AccountIDEndpointModePreferred + } + + cfg.AccountIDEndpointMode = m + return nil +} + // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index c546cb7d0f5..d7a2b5307ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -115,6 +115,9 @@ const ( requestMinCompressionSizeBytes = "request_min_compression_size_bytes" s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" + + accountIDKey = "aws_account_id" + accountIDEndpointMode = "account_id_endpoint_mode" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -341,6 +344,8 @@ type SharedConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1124,12 +1129,17 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) } + if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) + } + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), SecretAccessKey: section.String(secretAccessKey), SessionToken: section.String(sessionTokenKey), Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + AccountID: section.String(accountIDKey), } if creds.HasKeys() { @@ -1177,6 +1187,26 @@ func updateDisableRequestCompression(disable **bool, sec ini.Section, key string return nil } +func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch v { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) + } + + return nil +} + func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1191,6 +1221,10 @@ func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, b return *c.DisableRequestCompression, true, nil } +func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 3b0bad426ca..66b7868161d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,67 @@ +# v1.17.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.11 (2024-04-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go index 9a869f89547..dc291c97cd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -128,6 +128,7 @@ type GetCredentialsOutput struct { AccessKeyID string SecretAccessKey string Token string + AccountID string } // EndpointError is an error returned from the endpoint service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 0c3c4d68266..2386153a9ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -152,6 +152,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.Token, Source: ProviderName, + AccountID: resp.AccountID, } if resp.Expiration != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 4cb3e3039ff..aaed530a261 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.11" +const goModuleVersion = "1.17.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index fe9345e287c..911fcc32729 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -167,6 +167,9 @@ type CredentialProcessResponse struct { // The date on which the current credentials expire. Expiration *time.Time + + // The ID of the account for credentials + AccountID string `json:"AccountId"` } // Retrieve executes the credential process command and returns the @@ -208,6 +211,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { AccessKeyID: resp.AccessKeyID, SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.SessionToken, + AccountID: resp.AccountID, } // Handle expiration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go index b3cf7853e76..8c230be8eb8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -129,6 +129,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { CanExpire: true, Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), Source: ProviderName, + AccountID: p.options.AccountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index 289707b6de4..4c7f7993f54 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -308,6 +308,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err return aws.Credentials{Source: ProviderName}, err } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + return aws.Credentials{ AccessKeyID: *resp.Credentials.AccessKeyId, SecretAccessKey: *resp.Credentials.SecretAccessKey, @@ -316,5 +321,6 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index ddaf6df6ce1..b4b71970862 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -135,6 +136,11 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. @@ -145,6 +151,19 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials Source: WebIdentityProviderName, CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, } return value, nil } + +// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" +func getAccountID(u *types.AssumedRoleUser) string { + if u.Arn == nil { + return "" + } + parts := strings.Split(*u.Arn, ":") + if len(parts) < 5 { + return "" + } + return parts[4] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 3807833dd43..3584159df8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.16.11 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 5642306f87b..1cd7560633a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.1" +const goModuleVersion = "1.16.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go index 0c5a2d40c9f..24db8e144cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -5,6 +5,7 @@ import ( "fmt" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" "github.com/aws/smithy-go/auth" @@ -39,7 +40,10 @@ func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + signingTime := sdk.NowTime() + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime = signingTime.Add(skew) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go deleted file mode 100644 index 58ef438a196..00000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go +++ /dev/null @@ -1,225 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - if dstVal.Kind() == reflect.String { - dstVal.SetString(srcVal.String()) - } else { - dstVal.Set(srcVal) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 72e196dd9ea..3c1d846e03d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index faf71cac3be..7926a49c244 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.5" +const goModuleVersion = "1.3.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go new file mode 100644 index 00000000000..f0c283d3942 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go @@ -0,0 +1,52 @@ +package context + +import ( + "context" + "time" + + "github.com/aws/smithy-go/middleware" +) + +type s3BackendKey struct{} +type checksumInputAlgorithmKey struct{} +type clockSkew struct{} + +const ( + // S3BackendS3Express identifies the S3Express backend + S3BackendS3Express = "S3Express" +) + +// SetS3Backend stores the resolved endpoint backend within the request +// context, which is required for a variety of custom S3 behaviors. +func SetS3Backend(ctx context.Context, typ string) context.Context { + return middleware.WithStackValue(ctx, s3BackendKey{}, typ) +} + +// GetS3Backend retrieves the stored endpoint backend within the context. +func GetS3Backend(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) + return v +} + +// SetChecksumInputAlgorithm sets the request checksum algorithm on the +// context. +func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) +} + +// GetChecksumInputAlgorithm returns the checksum algorithm from the context. +func GetChecksumInputAlgorithm(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) + return v +} + +// SetAttemptSkewContext sets the clock skew value on the context +func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { + return middleware.WithStackValue(ctx, clockSkew{}, v) +} + +// GetAttemptSkewContext gets the clock skew value from the context +func GetAttemptSkewContext(ctx context.Context) time.Duration { + x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) + return x +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go index ba6032758a5..91414afe81c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -12,11 +12,12 @@ type Partition struct { // PartitionConfig provides the endpoint metadata for an AWS region or partition. type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` + ImplicitGlobalRegion string `json:"implicitGlobalRegion"` } type RegionOverrides struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 849beffd7da..5f0779997de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -13,11 +13,12 @@ var partitions = []Partition{ ID: "aws", RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-east-1", }, Regions: map[string]RegionOverrides{ "af-south-1": { @@ -111,6 +112,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ca-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-central-1": { Name: nil, DnsSuffix: nil, @@ -229,11 +237,12 @@ var partitions = []Partition{ ID: "aws-cn", RegionRegex: "^cn\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "cn-northwest-1", }, Regions: map[string]RegionOverrides{ "aws-cn-global": { @@ -263,11 +272,12 @@ var partitions = []Partition{ ID: "aws-us-gov", RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides{ "aws-us-gov-global": { @@ -297,11 +307,12 @@ var partitions = []Partition{ ID: "aws-iso", RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-global": { @@ -331,11 +342,12 @@ var partitions = []Partition{ ID: "aws-iso-b", RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-b-global": { @@ -358,23 +370,33 @@ var partitions = []Partition{ ID: "aws-iso-e", RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eu-isoe-west-1", + }, + Regions: map[string]RegionOverrides{ + "eu-isoe-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, }, - Regions: map[string]RegionOverrides{}, }, { ID: "aws-iso-f", RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index f376f6908aa..7a28569c3de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -198,7 +198,11 @@ "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } }, { "id" : "aws-iso-f", "outputs" : { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 6f6dafa8d19..549df6013c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,43 @@ +# v2.6.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 279816314e9..dcb5a4b93be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.5" +const goModuleVersion = "2.6.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go new file mode 100644 index 00000000000..8e24a3f0a47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "context" + "sync/atomic" + "time" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. +// This can be read by other operations (such as signing) to correct the date value they send +// on the request +type AddTimeOffsetMiddleware struct { + Offset *atomic.Int64 +} + +// ID the identifier for AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } + +// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. +func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + if m.Offset != nil { + offset := time.Duration(m.Offset.Load()) + ctx = internalcontext.SetAttemptSkewContext(ctx, offset) + } + return next.HandleBuild(ctx, in) +} + +// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer +// held by AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { + m.Offset.Store(v.Nanoseconds()) + } + return next.HandleDeserialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/CHANGELOG.md index c1c52b22105..223ee4a7604 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.27.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.26.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.11 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.10 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.9 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.8 (2024-05-23) + +* No change notes available for this release. + +# v1.25.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.25.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_client.go index 88691bdd9d6..7039e376cf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_client.go @@ -15,7 +15,9 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" @@ -23,6 +25,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -33,6 +36,9 @@ const ServiceAPIVersion = "2020-08-01" // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -73,6 +79,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -234,15 +242,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -446,6 +455,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func resolveIdempotencyTokenProvider(o *Options) { if o.IdempotencyTokenProvider != nil { return @@ -496,6 +529,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + // IdempotencyTokenProvider interface for providing idempotency token type IdempotencyTokenProvider interface { GetIdempotencyToken() (string, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateAlertManagerDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateAlertManagerDefinition.go index 8819178714c..a2ae41474fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateAlertManagerDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateAlertManagerDefinition.go @@ -33,9 +33,11 @@ func (c *Client) CreateAlertManagerDefinition(ctx context.Context, params *Creat type CreateAlertManagerDefinitionInput struct { // The alert manager definition to add. A base64-encoded version of the YAML alert - // manager definition file. For details about the alert manager definition, see - // AlertManagedDefinitionData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html) - // . + // manager definition file. + // + // For details about the alert manager definition, see [AlertManagedDefinitionData]. + // + // [AlertManagedDefinitionData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html // // This member is required. Data []byte @@ -121,6 +123,12 @@ func (c *Client) addOperationCreateAlertManagerDefinitionMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateAlertManagerDefinitionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateLoggingConfiguration.go index 396c66b4c8f..bf437ebc54d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateLoggingConfiguration.go @@ -119,6 +119,12 @@ func (c *Client) addOperationCreateLoggingConfigurationMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateLoggingConfigurationMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateRuleGroupsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateRuleGroupsNamespace.go index 600c04e28f6..c2af92ac6f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateRuleGroupsNamespace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateRuleGroupsNamespace.go @@ -13,9 +13,10 @@ import ( // The CreateRuleGroupsNamespace operation creates a rule groups namespace within // a workspace. A rule groups namespace is associated with exactly one rules file. -// A workspace can have multiple rule groups namespaces. Use this operation only to -// create new rule groups namespaces. To update an existing rule groups namespace, -// use PutRuleGroupsNamespace . +// A workspace can have multiple rule groups namespaces. +// +// Use this operation only to create new rule groups namespaces. To update an +// existing rule groups namespace, use PutRuleGroupsNamespace . func (c *Client) CreateRuleGroupsNamespace(ctx context.Context, params *CreateRuleGroupsNamespaceInput, optFns ...func(*Options)) (*CreateRuleGroupsNamespaceOutput, error) { if params == nil { params = &CreateRuleGroupsNamespaceInput{} @@ -34,10 +35,13 @@ func (c *Client) CreateRuleGroupsNamespace(ctx context.Context, params *CreateRu // Represents the input of a CreateRuleGroupsNamespace operation. type CreateRuleGroupsNamespaceInput struct { - // The rules file to use in the new namespace. Contains the base64-encoded version - // of the YAML rules file. For details about the rule groups namespace structure, - // see RuleGroupsNamespaceData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html) - // . + // The rules file to use in the new namespace. + // + // Contains the base64-encoded version of the YAML rules file. + // + // For details about the rule groups namespace structure, see [RuleGroupsNamespaceData]. + // + // [RuleGroupsNamespaceData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html // // This member is required. Data []byte @@ -144,6 +148,12 @@ func (c *Client) addOperationCreateRuleGroupsNamespaceMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateRuleGroupsNamespaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateScraper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateScraper.go index 72471fbde92..5f4c4487ba0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateScraper.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateScraper.go @@ -15,17 +15,24 @@ import ( // pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, // and sends them to your Amazon Managed Service for Prometheus workspace. You can // configure the scraper to control what metrics are collected, and what -// transformations are applied prior to sending them to your workspace. If needed, -// an IAM role will be created for you that gives Amazon Managed Service for -// Prometheus access to the metrics in your cluster. For more information, see -// Using roles for scraping metrics from EKS (https://docs.aws.amazon.com/prometheus/latest/userguide/using-service-linked-roles.html#using-service-linked-roles-prom-scraper) -// in the Amazon Managed Service for Prometheus User Guide. You cannot update a -// scraper. If you want to change the configuration of the scraper, create a new -// scraper and delete the old one. The scrapeConfiguration parameter contains the -// base64-encoded version of the YAML configuration file. For more information -// about collectors, including what metrics are collected, and how to configure the -// scraper, see Amazon Web Services managed collectors (https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector.html) -// in the Amazon Managed Service for Prometheus User Guide. +// transformations are applied prior to sending them to your workspace. +// +// If needed, an IAM role will be created for you that gives Amazon Managed +// Service for Prometheus access to the metrics in your cluster. For more +// information, see [Using roles for scraping metrics from EKS]in the Amazon Managed Service for Prometheus User Guide. +// +// You cannot update a scraper. If you want to change the configuration of the +// scraper, create a new scraper and delete the old one. +// +// The scrapeConfiguration parameter contains the base64-encoded version of the +// YAML configuration file. +// +// For more information about collectors, including what metrics are collected, +// and how to configure the scraper, see [Amazon Web Services managed collectors]in the Amazon Managed Service for +// Prometheus User Guide. +// +// [Amazon Web Services managed collectors]: https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector.html +// [Using roles for scraping metrics from EKS]: https://docs.aws.amazon.com/prometheus/latest/userguide/using-service-linked-roles.html#using-service-linked-roles-prom-scraper func (c *Client) CreateScraper(ctx context.Context, params *CreateScraperInput, optFns ...func(*Options)) (*CreateScraperOutput, error) { if params == nil { params = &CreateScraperInput{} @@ -49,8 +56,8 @@ type CreateScraperInput struct { // This member is required. Destination types.Destination - // The configuration file to use in the new scraper. For more information, see - // Scraper configuration in the Amazon Managed Service for Prometheus User Guide. + // The configuration file to use in the new scraper. For more information, see Scraper configuration in + // the Amazon Managed Service for Prometheus User Guide. // // This member is required. ScrapeConfiguration types.ScrapeConfiguration @@ -156,6 +163,12 @@ func (c *Client) addOperationCreateScraperMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateScraperMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateWorkspace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateWorkspace.go index f59b866f0a9..97fc8a74ad5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateWorkspace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_CreateWorkspace.go @@ -33,8 +33,10 @@ func (c *Client) CreateWorkspace(ctx context.Context, params *CreateWorkspaceInp type CreateWorkspaceInput struct { // An alias that you assign to this workspace to help you identify it. It does not - // need to be unique. Blank spaces at the beginning or end of the alias that you - // specify will be trimmed from the value used. + // need to be unique. + // + // Blank spaces at the beginning or end of the alias that you specify will be + // trimmed from the value used. Alias *string // A unique identifier that you can provide to ensure the idempotency of the @@ -43,8 +45,9 @@ type CreateWorkspaceInput struct { // (optional) The ARN for a customer managed KMS key to use for encrypting data // within your workspace. For more information about using your own key in your - // workspace, see Encryption at rest (https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) - // in the Amazon Managed Service for Prometheus User Guide. + // workspace, see [Encryption at rest]in the Amazon Managed Service for Prometheus User Guide. + // + // [Encryption at rest]: https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html KmsKeyArn *string // The list of tag keys and values to associate with the workspace. @@ -140,6 +143,12 @@ func (c *Client) addOperationCreateWorkspaceMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateWorkspaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteAlertManagerDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteAlertManagerDefinition.go index e893a2189ab..57859de333d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteAlertManagerDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteAlertManagerDefinition.go @@ -103,6 +103,12 @@ func (c *Client) addOperationDeleteAlertManagerDefinitionMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteAlertManagerDefinitionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteLoggingConfiguration.go index 4e98ba52e5d..a793bf4eca3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteLoggingConfiguration.go @@ -103,6 +103,12 @@ func (c *Client) addOperationDeleteLoggingConfigurationMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteLoggingConfigurationMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteRuleGroupsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteRuleGroupsNamespace.go index ccb4bef4b7b..3272600a093 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteRuleGroupsNamespace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteRuleGroupsNamespace.go @@ -109,6 +109,12 @@ func (c *Client) addOperationDeleteRuleGroupsNamespaceMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteRuleGroupsNamespaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteScraper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteScraper.go index 1d627005ae1..92dfd8c8e92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteScraper.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteScraper.go @@ -117,6 +117,12 @@ func (c *Client) addOperationDeleteScraperMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteScraperMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteWorkspace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteWorkspace.go index 48072422c60..3bf93c57543 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteWorkspace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DeleteWorkspace.go @@ -10,9 +10,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an existing workspace. When you delete a workspace, the data that has -// been ingested into it is not immediately deleted. It will be permanently deleted -// within one month. +// Deletes an existing workspace. +// +// When you delete a workspace, the data that has been ingested into it is not +// immediately deleted. It will be permanently deleted within one month. func (c *Client) DeleteWorkspace(ctx context.Context, params *DeleteWorkspaceInput, optFns ...func(*Options)) (*DeleteWorkspaceOutput, error) { if params == nil { params = &DeleteWorkspaceInput{} @@ -105,6 +106,12 @@ func (c *Client) addOperationDeleteWorkspaceMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteWorkspaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeAlertManagerDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeAlertManagerDefinition.go index 4c189711f00..7c88f281236 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeAlertManagerDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeAlertManagerDefinition.go @@ -108,6 +108,12 @@ func (c *Client) addOperationDescribeAlertManagerDefinitionMiddlewares(stack *mi if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeAlertManagerDefinitionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeLoggingConfiguration.go index 6c4acd5f42e..82a985974d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeLoggingConfiguration.go @@ -108,6 +108,12 @@ func (c *Client) addOperationDescribeLoggingConfigurationMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeLoggingConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeRuleGroupsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeRuleGroupsNamespace.go index 7df18632e9e..7114178f600 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeRuleGroupsNamespace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeRuleGroupsNamespace.go @@ -113,6 +113,12 @@ func (c *Client) addOperationDescribeRuleGroupsNamespaceMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeRuleGroupsNamespaceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeScraper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeScraper.go index 1b3f134a926..4f6901c15bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeScraper.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeScraper.go @@ -12,7 +12,7 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" + jmespath "github.com/jmespath/go-jmespath" "time" ) @@ -112,6 +112,12 @@ func (c *Client) addOperationDescribeScraperMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeScraperValidationMiddleware(stack); err != nil { return err } @@ -136,14 +142,6 @@ func (c *Client) addOperationDescribeScraperMiddlewares(stack *middleware.Stack, return nil } -// DescribeScraperAPIClient is a client that implements the DescribeScraper -// operation. -type DescribeScraperAPIClient interface { - DescribeScraper(context.Context, *DescribeScraperInput, ...func(*Options)) (*DescribeScraperOutput, error) -} - -var _ DescribeScraperAPIClient = (*Client)(nil) - // ScraperActiveWaiterOptions are waiter options for ScraperActiveWaiter type ScraperActiveWaiterOptions struct { @@ -176,12 +174,13 @@ type ScraperActiveWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeScraperInput, *DescribeScraperOutput, error) (bool, error) } @@ -258,7 +257,13 @@ func (w *ScraperActiveWaiter) WaitForOutput(ctx context.Context, params *Describ } out, err := w.client.DescribeScraper(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -365,12 +370,13 @@ type ScraperDeletedWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeScraperInput, *DescribeScraperOutput, error) (bool, error) } @@ -447,7 +453,13 @@ func (w *ScraperDeletedWaiter) WaitForOutput(ctx context.Context, params *Descri } out, err := w.client.DescribeScraper(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -512,6 +524,14 @@ func scraperDeletedStateRetryable(ctx context.Context, input *DescribeScraperInp return true, nil } +// DescribeScraperAPIClient is a client that implements the DescribeScraper +// operation. +type DescribeScraperAPIClient interface { + DescribeScraper(context.Context, *DescribeScraperInput, ...func(*Options)) (*DescribeScraperOutput, error) +} + +var _ DescribeScraperAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeScraper(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeWorkspace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeWorkspace.go index c42fcd65567..a0ba496dbab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeWorkspace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_DescribeWorkspace.go @@ -12,7 +12,7 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" + jmespath "github.com/jmespath/go-jmespath" "time" ) @@ -112,6 +112,12 @@ func (c *Client) addOperationDescribeWorkspaceMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeWorkspaceValidationMiddleware(stack); err != nil { return err } @@ -136,14 +142,6 @@ func (c *Client) addOperationDescribeWorkspaceMiddlewares(stack *middleware.Stac return nil } -// DescribeWorkspaceAPIClient is a client that implements the DescribeWorkspace -// operation. -type DescribeWorkspaceAPIClient interface { - DescribeWorkspace(context.Context, *DescribeWorkspaceInput, ...func(*Options)) (*DescribeWorkspaceOutput, error) -} - -var _ DescribeWorkspaceAPIClient = (*Client)(nil) - // WorkspaceActiveWaiterOptions are waiter options for WorkspaceActiveWaiter type WorkspaceActiveWaiterOptions struct { @@ -176,12 +174,13 @@ type WorkspaceActiveWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeWorkspaceInput, *DescribeWorkspaceOutput, error) (bool, error) } @@ -258,7 +257,13 @@ func (w *WorkspaceActiveWaiter) WaitForOutput(ctx context.Context, params *Descr } out, err := w.client.DescribeWorkspace(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -382,12 +387,13 @@ type WorkspaceDeletedWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeWorkspaceInput, *DescribeWorkspaceOutput, error) (bool, error) } @@ -464,7 +470,13 @@ func (w *WorkspaceDeletedWaiter) WaitForOutput(ctx context.Context, params *Desc } out, err := w.client.DescribeWorkspace(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -529,6 +541,14 @@ func workspaceDeletedStateRetryable(ctx context.Context, input *DescribeWorkspac return true, nil } +// DescribeWorkspaceAPIClient is a client that implements the DescribeWorkspace +// operation. +type DescribeWorkspaceAPIClient interface { + DescribeWorkspace(context.Context, *DescribeWorkspaceInput, ...func(*Options)) (*DescribeWorkspaceOutput, error) +} + +var _ DescribeWorkspaceAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeWorkspace(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_GetDefaultScraperConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_GetDefaultScraperConfiguration.go index 88127288c71..150d562f624 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_GetDefaultScraperConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_GetDefaultScraperConfiguration.go @@ -35,9 +35,10 @@ type GetDefaultScraperConfigurationInput struct { // Represents the output of a GetDefaultScraperConfiguration operation. type GetDefaultScraperConfigurationOutput struct { - // The configuration file. Base 64 encoded. For more information, see Scraper - // configuration (https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-configuration) - // in the Amazon Managed Service for Prometheus User Guide. + // The configuration file. Base 64 encoded. For more information, see [Scraper configuration]in the + // Amazon Managed Service for Prometheus User Guide. + // + // [Scraper configuration]: https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-configuration // // This member is required. Configuration []byte @@ -103,6 +104,12 @@ func (c *Client) addOperationGetDefaultScraperConfigurationMiddlewares(stack *mi if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDefaultScraperConfiguration(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListRuleGroupsNamespaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListRuleGroupsNamespaces.go index a79f71b4a14..d3f4ebb6448 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListRuleGroupsNamespaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListRuleGroupsNamespaces.go @@ -45,10 +45,12 @@ type ListRuleGroupsNamespacesInput struct { // The token for the next set of items to return. You receive this token from a // previous call, and use it to get the next page of results. The other parameters - // must be the same as the initial call. For example, if your initial request has - // maxResults of 10, and there are 12 rule groups namespaces to return, then your - // initial request will return 10 and a nextToken . Using the next token in a - // subsequent call will return the remaining 2 namespaces. + // must be the same as the initial call. + // + // For example, if your initial request has maxResults of 10, and there are 12 + // rule groups namespaces to return, then your initial request will return 10 and a + // nextToken . Using the next token in a subsequent call will return the remaining + // 2 namespaces. NextToken *string noSmithyDocumentSerde @@ -128,6 +130,12 @@ func (c *Client) addOperationListRuleGroupsNamespacesMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListRuleGroupsNamespacesValidationMiddleware(stack); err != nil { return err } @@ -152,14 +160,6 @@ func (c *Client) addOperationListRuleGroupsNamespacesMiddlewares(stack *middlewa return nil } -// ListRuleGroupsNamespacesAPIClient is a client that implements the -// ListRuleGroupsNamespaces operation. -type ListRuleGroupsNamespacesAPIClient interface { - ListRuleGroupsNamespaces(context.Context, *ListRuleGroupsNamespacesInput, ...func(*Options)) (*ListRuleGroupsNamespacesOutput, error) -} - -var _ ListRuleGroupsNamespacesAPIClient = (*Client)(nil) - // ListRuleGroupsNamespacesPaginatorOptions is the paginator options for // ListRuleGroupsNamespaces type ListRuleGroupsNamespacesPaginatorOptions struct { @@ -225,6 +225,9 @@ func (p *ListRuleGroupsNamespacesPaginator) NextPage(ctx context.Context, optFns } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListRuleGroupsNamespaces(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -244,6 +247,14 @@ func (p *ListRuleGroupsNamespacesPaginator) NextPage(ctx context.Context, optFns return result, nil } +// ListRuleGroupsNamespacesAPIClient is a client that implements the +// ListRuleGroupsNamespaces operation. +type ListRuleGroupsNamespacesAPIClient interface { + ListRuleGroupsNamespaces(context.Context, *ListRuleGroupsNamespacesInput, ...func(*Options)) (*ListRuleGroupsNamespacesOutput, error) +} + +var _ ListRuleGroupsNamespacesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListRuleGroupsNamespaces(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListScrapers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListScrapers.go index 1f43040dadc..f677f05d647 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListScrapers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListScrapers.go @@ -33,21 +33,27 @@ func (c *Client) ListScrapers(ctx context.Context, params *ListScrapersInput, op type ListScrapersInput struct { // (Optional) A list of key-value pairs to filter the list of scrapers returned. - // Keys include status , sourceArn , destinationArn , and alias . Filters on the - // same key are OR 'd together, and filters on different keys are AND 'd together. - // For example, status=ACTIVE&status=CREATING&alias=Test , will return all scrapers - // that have the alias Test, and are either in status ACTIVE or CREATING. To find - // all active scrapers that are sending metrics to a specific Amazon Managed - // Service for Prometheus workspace, you would use the ARN of the workspace in a - // query: - // status=ACTIVE&destinationArn=arn:aws:aps:us-east-1:123456789012:workspace/ws-example1-1234-abcd-56ef-123456789012 + // Keys include status , sourceArn , destinationArn , and alias . + // + // Filters on the same key are OR 'd together, and filters on different keys are + // AND 'd together. For example, status=ACTIVE&status=CREATING&alias=Test , will + // return all scrapers that have the alias Test, and are either in status ACTIVE or + // CREATING. + // + // To find all active scrapers that are sending metrics to a specific Amazon + // Managed Service for Prometheus workspace, you would use the ARN of the workspace + // in a query: + // + // status=ACTIVE&destinationArn=arn:aws:aps:us-east-1:123456789012:workspace/ws-example1-1234-abcd-56ef-123456789012 + // // If this is included, it filters the results to only the scrapers that match the // filter. Filters map[string][]string // Optional) The maximum number of scrapers to return in one ListScrapers - // operation. The range is 1-1000. If you omit this parameter, the default of 100 - // is used. + // operation. The range is 1-1000. + // + // If you omit this parameter, the default of 100 is used. MaxResults *int32 // (Optional) The token for the next set of items to return. (You received this @@ -131,6 +137,12 @@ func (c *Client) addOperationListScrapersMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListScrapers(options.Region), middleware.Before); err != nil { return err } @@ -152,18 +164,12 @@ func (c *Client) addOperationListScrapersMiddlewares(stack *middleware.Stack, op return nil } -// ListScrapersAPIClient is a client that implements the ListScrapers operation. -type ListScrapersAPIClient interface { - ListScrapers(context.Context, *ListScrapersInput, ...func(*Options)) (*ListScrapersOutput, error) -} - -var _ ListScrapersAPIClient = (*Client)(nil) - // ListScrapersPaginatorOptions is the paginator options for ListScrapers type ListScrapersPaginatorOptions struct { // Optional) The maximum number of scrapers to return in one ListScrapers - // operation. The range is 1-1000. If you omit this parameter, the default of 100 - // is used. + // operation. The range is 1-1000. + // + // If you omit this parameter, the default of 100 is used. Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -224,6 +230,9 @@ func (p *ListScrapersPaginator) NextPage(ctx context.Context, optFns ...func(*Op } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListScrapers(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -243,6 +252,13 @@ func (p *ListScrapersPaginator) NextPage(ctx context.Context, optFns ...func(*Op return result, nil } +// ListScrapersAPIClient is a client that implements the ListScrapers operation. +type ListScrapersAPIClient interface { + ListScrapers(context.Context, *ListScrapersInput, ...func(*Options)) (*ListScrapersOutput, error) +} + +var _ ListScrapersAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListScrapers(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListTagsForResource.go index feb51b378cc..673056ae8e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListTagsForResource.go @@ -105,6 +105,12 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListWorkspaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListWorkspaces.go index cd0d3b54ac7..ada1f28f093 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListWorkspaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_ListWorkspaces.go @@ -32,9 +32,10 @@ func (c *Client) ListWorkspaces(ctx context.Context, params *ListWorkspacesInput type ListWorkspacesInput struct { // If this is included, it filters the results to only the workspaces with names - // that start with the value that you specify here. Amazon Managed Service for - // Prometheus will automatically strip any blank spaces from the beginning and end - // of the alias that you specify. + // that start with the value that you specify here. + // + // Amazon Managed Service for Prometheus will automatically strip any blank spaces + // from the beginning and end of the alias that you specify. Alias *string // The maximum number of workspaces to return per request. The default is 100. @@ -42,10 +43,12 @@ type ListWorkspacesInput struct { // The token for the next set of items to return. You receive this token from a // previous call, and use it to get the next page of results. The other parameters - // must be the same as the initial call. For example, if your initial request has - // maxResults of 10, and there are 12 workspaces to return, then your initial - // request will return 10 and a nextToken . Using the next token in a subsequent - // call will return the remaining 2 workspaces. + // must be the same as the initial call. + // + // For example, if your initial request has maxResults of 10, and there are 12 + // workspaces to return, then your initial request will return 10 and a nextToken . + // Using the next token in a subsequent call will return the remaining 2 + // workspaces. NextToken *string noSmithyDocumentSerde @@ -125,6 +128,12 @@ func (c *Client) addOperationListWorkspacesMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListWorkspaces(options.Region), middleware.Before); err != nil { return err } @@ -146,14 +155,6 @@ func (c *Client) addOperationListWorkspacesMiddlewares(stack *middleware.Stack, return nil } -// ListWorkspacesAPIClient is a client that implements the ListWorkspaces -// operation. -type ListWorkspacesAPIClient interface { - ListWorkspaces(context.Context, *ListWorkspacesInput, ...func(*Options)) (*ListWorkspacesOutput, error) -} - -var _ ListWorkspacesAPIClient = (*Client)(nil) - // ListWorkspacesPaginatorOptions is the paginator options for ListWorkspaces type ListWorkspacesPaginatorOptions struct { // The maximum number of workspaces to return per request. The default is 100. @@ -217,6 +218,9 @@ func (p *ListWorkspacesPaginator) NextPage(ctx context.Context, optFns ...func(* } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListWorkspaces(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -236,6 +240,14 @@ func (p *ListWorkspacesPaginator) NextPage(ctx context.Context, optFns ...func(* return result, nil } +// ListWorkspacesAPIClient is a client that implements the ListWorkspaces +// operation. +type ListWorkspacesAPIClient interface { + ListWorkspaces(context.Context, *ListWorkspacesInput, ...func(*Options)) (*ListWorkspacesOutput, error) +} + +var _ ListWorkspacesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListWorkspaces(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutAlertManagerDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutAlertManagerDefinition.go index 1311cf7cdc0..e659ca5338a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutAlertManagerDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutAlertManagerDefinition.go @@ -33,9 +33,11 @@ func (c *Client) PutAlertManagerDefinition(ctx context.Context, params *PutAlert type PutAlertManagerDefinitionInput struct { // The alert manager definition to use. A base64-encoded version of the YAML alert - // manager definition file. For details about the alert manager definition, see - // AlertManagedDefinitionData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html) - // . + // manager definition file. + // + // For details about the alert manager definition, see [AlertManagedDefinitionData]. + // + // [AlertManagedDefinitionData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html // // This member is required. Data []byte @@ -121,6 +123,12 @@ func (c *Client) addOperationPutAlertManagerDefinitionMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opPutAlertManagerDefinitionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutRuleGroupsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutRuleGroupsNamespace.go index 8480d8c9570..32bc9667f80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutRuleGroupsNamespace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_PutRuleGroupsNamespace.go @@ -13,10 +13,13 @@ import ( // Updates an existing rule groups namespace within a workspace. A rule groups // namespace is associated with exactly one rules file. A workspace can have -// multiple rule groups namespaces. Use this operation only to update existing rule -// groups namespaces. To create a new rule groups namespace, use -// CreateRuleGroupsNamespace . You can't use this operation to add tags to an -// existing rule groups namespace. Instead, use TagResource . +// multiple rule groups namespaces. +// +// Use this operation only to update existing rule groups namespaces. To create a +// new rule groups namespace, use CreateRuleGroupsNamespace . +// +// You can't use this operation to add tags to an existing rule groups namespace. +// Instead, use TagResource . func (c *Client) PutRuleGroupsNamespace(ctx context.Context, params *PutRuleGroupsNamespaceInput, optFns ...func(*Options)) (*PutRuleGroupsNamespaceOutput, error) { if params == nil { params = &PutRuleGroupsNamespaceInput{} @@ -36,9 +39,11 @@ func (c *Client) PutRuleGroupsNamespace(ctx context.Context, params *PutRuleGrou type PutRuleGroupsNamespaceInput struct { // The new rules file to use in the namespace. A base64-encoded version of the - // YAML rule groups file. For details about the rule groups namespace structure, - // see RuleGroupsNamespaceData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html) - // . + // YAML rule groups file. + // + // For details about the rule groups namespace structure, see [RuleGroupsNamespaceData]. + // + // [RuleGroupsNamespaceData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html // // This member is required. Data []byte @@ -142,6 +147,12 @@ func (c *Client) addOperationPutRuleGroupsNamespaceMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opPutRuleGroupsNamespaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_TagResource.go index 312f7531cc6..69f3969345e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_TagResource.go @@ -12,10 +12,12 @@ import ( // The TagResource operation associates tags with an Amazon Managed Service for // Prometheus resource. The only resources that can be tagged are workspaces and -// rule groups namespaces. If you specify a new tag key for the resource, this tag -// is appended to the list of tags associated with the resource. If you specify a -// tag key that is already associated with the resource, the new tag value that you -// specify replaces the previous value for that tag. +// rule groups namespaces. +// +// If you specify a new tag key for the resource, this tag is appended to the list +// of tags associated with the resource. If you specify a tag key that is already +// associated with the resource, the new tag value that you specify replaces the +// previous value for that tag. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -38,8 +40,9 @@ type TagResourceInput struct { // This member is required. ResourceArn *string - // The list of tag keys and values to associate with the resource. Keys may not - // begin with aws: . + // The list of tag keys and values to associate with the resource. + // + // Keys may not begin with aws: . // // This member is required. Tags map[string]string @@ -109,6 +112,12 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UntagResource.go index c9f52f5a2c4..d691f4d112a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UntagResource.go @@ -105,6 +105,12 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateLoggingConfiguration.go index 9ef8f73f0ec..c7d2e32db85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateLoggingConfiguration.go @@ -118,6 +118,12 @@ func (c *Client) addOperationUpdateLoggingConfigurationMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opUpdateLoggingConfigurationMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateWorkspaceAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateWorkspaceAlias.go index d6b11a31563..5e03684d7b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateWorkspaceAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/api_op_UpdateWorkspaceAlias.go @@ -34,9 +34,10 @@ type UpdateWorkspaceAliasInput struct { // This member is required. WorkspaceId *string - // The new alias for the workspace. It does not need to be unique. Amazon Managed - // Service for Prometheus will automatically strip any blank spaces from the - // beginning and end of the alias that you specify. + // The new alias for the workspace. It does not need to be unique. + // + // Amazon Managed Service for Prometheus will automatically strip any blank spaces + // from the beginning and end of the alias that you specify. Alias *string // A unique identifier that you can provide to ensure the idempotency of the @@ -108,6 +109,12 @@ func (c *Client) addOperationUpdateWorkspaceAliasMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opUpdateWorkspaceAliasMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/auth.go index 8e90d81a4bd..98bfd5e06bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/deserializers.go index 013411c1afa..e2152ea93bb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/deserializers.go @@ -20,8 +20,17 @@ import ( "io/ioutil" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpCreateAlertManagerDefinition struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/doc.go index 02f301f2a19..178abe44421 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/doc.go @@ -9,13 +9,19 @@ // Prometheus, you can use the same open-source Prometheus data model and query // language that you use today to monitor the performance of your containerized // workloads, and also enjoy improved scalability, availability, and security -// without having to manage the underlying infrastructure. For more information -// about Amazon Managed Service for Prometheus, see the Amazon Managed Service for -// Prometheus (https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) -// User Guide. Amazon Managed Service for Prometheus includes two APIs. +// without having to manage the underlying infrastructure. +// +// For more information about Amazon Managed Service for Prometheus, see the [Amazon Managed Service for Prometheus] User +// Guide. +// +// Amazon Managed Service for Prometheus includes two APIs. +// // - Use the Amazon Web Services API described in this guide to manage Amazon // Managed Service for Prometheus resources, such as workspaces, rule groups, and // alert managers. -// - Use the Prometheus-compatible API (https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-APIReference.html#AMP-APIReference-Prometheus-Compatible-Apis) -// to work within your Prometheus workspace. +// +// - Use the [Prometheus-compatible API]to work within your Prometheus workspace. +// +// [Amazon Managed Service for Prometheus]: https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html +// [Prometheus-compatible API]: https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-APIReference.html#AMP-APIReference-Prometheus-Compatible-Apis package amp diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/endpoints.go index 8363954b9f4..8ebd7284452 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -446,7 +457,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -476,6 +487,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -485,7 +500,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/go_module_metadata.go index 135aa580e23..6e4f16cb625 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/go_module_metadata.go @@ -3,4 +3,4 @@ package amp // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.25.4" +const goModuleVersion = "1.27.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/options.go index 829dbabaa8f..b8abe73567e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -74,17 +79,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -101,8 +109,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -147,6 +156,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/enums.go index 8bf4b70e3fa..5f3407ab9d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/enums.go @@ -26,8 +26,9 @@ const ( // Values returns all known values for AlertManagerDefinitionStatusCode. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (AlertManagerDefinitionStatusCode) Values() []AlertManagerDefinitionStatusCode { return []AlertManagerDefinitionStatusCode{ "CREATING", @@ -63,8 +64,9 @@ const ( // Values returns all known values for LoggingConfigurationStatusCode. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (LoggingConfigurationStatusCode) Values() []LoggingConfigurationStatusCode { return []LoggingConfigurationStatusCode{ "CREATING", @@ -100,8 +102,9 @@ const ( // Values returns all known values for RuleGroupsNamespaceStatusCode. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RuleGroupsNamespaceStatusCode) Values() []RuleGroupsNamespaceStatusCode { return []RuleGroupsNamespaceStatusCode{ "CREATING", @@ -130,8 +133,9 @@ const ( ) // Values returns all known values for ScraperStatusCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ScraperStatusCode) Values() []ScraperStatusCode { return []ScraperStatusCode{ "CREATING", @@ -154,6 +158,7 @@ const ( // Values returns all known values for ValidationExceptionReason. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ValidationExceptionReason) Values() []ValidationExceptionReason { return []ValidationExceptionReason{ @@ -182,8 +187,9 @@ const ( ) // Values returns all known values for WorkspaceStatusCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (WorkspaceStatusCode) Values() []WorkspaceStatusCode { return []WorkspaceStatusCode{ "CREATING", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/types.go index 97bad32c5e8..4df5592a855 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/amp/types/types.go @@ -15,9 +15,11 @@ type AlertManagerDefinitionDescription struct { // This member is required. CreatedAt *time.Time - // The actual alert manager definition. For details about the alert manager - // definition, see AlertManagedDefinitionData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html) - // . + // The actual alert manager definition. + // + // For details about the alert manager definition, see [AlertManagedDefinitionData]. + // + // [AlertManagedDefinitionData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-AlertManagerDefinitionData.html // // This member is required. Data []byte @@ -158,9 +160,11 @@ type RuleGroupsNamespaceDescription struct { // This member is required. CreatedAt *time.Time - // The rule groups file used in the namespace. For details about the rule groups - // namespace structure, see RuleGroupsNamespaceData (https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html) - // . + // The rule groups file used in the namespace. + // + // For details about the rule groups namespace structure, see [RuleGroupsNamespaceData]. + // + // [RuleGroupsNamespaceData]: https://docs.aws.amazon.com/prometheus/latest/APIReference/yaml-RuleGroupsNamespaceData.html // // This member is required. Data []byte @@ -238,8 +242,7 @@ type RuleGroupsNamespaceSummary struct { } // A scrape configuration for a scraper, base 64 encoded. For more information, -// see Scraper configuration in the Amazon Managed Service for Prometheus User -// Guide. +// see Scraper configurationin the Amazon Managed Service for Prometheus User Guide. // // The following types satisfy this interface: // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/CHANGELOG.md index a9bf6fe6edb..fcad8a220c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.40.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.39.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.7 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.6 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.5 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.4 (2024-05-23) + +* No change notes available for this release. + +# v1.38.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.1 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.38.0 (2024-04-11) * **Feature**: This release adds support for Metric Characteristics for CloudWatch Anomaly Detection. Anomaly Detector now takes Metric Characteristics object with Periodic Spikes boolean field that tells Anomaly Detection that spikes that repeat at the same time every week are part of the expected pattern. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_client.go index c2d20f8e228..22c8cd44804 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2010-08-01" // Client provides the API client to make operations call for Amazon CloudWatch. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -240,6 +248,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { AppID: cfg.AppID, DisableRequestCompression: cfg.DisableRequestCompression, RequestMinCompressSizeBytes: cfg.RequestMinCompressSizeBytes, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -443,6 +452,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -486,6 +519,75 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addIsRequestCompressionUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + if !options.DisableRequestCompression { + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureGZIPRequestCompression) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAlarms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAlarms.go index d1f49051f27..cb6395bf94a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAlarms.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAlarms.go @@ -13,20 +13,27 @@ import ( // Deletes the specified alarms. You can delete up to 100 alarms in one operation. // However, this total can include no more than one composite alarm. For example, // you could delete 99 metric alarms and one composite alarms with one operation, -// but you can't delete two composite alarms with one operation. If you specify an -// incorrect alarm name or make any other error in the operation, no alarms are -// deleted. To confirm that alarms were deleted successfully, you can use the -// DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) -// operation after using DeleteAlarms . It is possible to create a loop or cycle of -// composite alarms, where composite alarm A depends on composite alarm B, and -// composite alarm B also depends on composite alarm A. In this scenario, you can't -// delete any composite alarm that is part of the cycle because there is always -// still a composite alarm that depends on that alarm that you want to delete. To -// get out of such a situation, you must break the cycle by changing the rule of -// one of the composite alarms in the cycle to remove a dependency that creates the -// cycle. The simplest change to make to break a cycle is to change the AlarmRule -// of one of the alarms to false . Additionally, the evaluation of composite alarms -// stops if CloudWatch detects a cycle in the evaluation path. +// but you can't delete two composite alarms with one operation. +// +// If you specify an incorrect alarm name or make any other error in the +// operation, no alarms are deleted. To confirm that alarms were deleted +// successfully, you can use the [DescribeAlarms]operation after using DeleteAlarms . +// +// It is possible to create a loop or cycle of composite alarms, where composite +// alarm A depends on composite alarm B, and composite alarm B also depends on +// composite alarm A. In this scenario, you can't delete any composite alarm that +// is part of the cycle because there is always still a composite alarm that +// depends on that alarm that you want to delete. +// +// To get out of such a situation, you must break the cycle by changing the rule +// of one of the composite alarms in the cycle to remove a dependency that creates +// the cycle. The simplest change to make to break a cycle is to change the +// AlarmRule of one of the alarms to false . +// +// Additionally, the evaluation of composite alarms stops if CloudWatch detects a +// cycle in the evaluation path. +// +// [DescribeAlarms]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html func (c *Client) DeleteAlarms(ctx context.Context, params *DeleteAlarmsInput, optFns ...func(*Options)) (*DeleteAlarmsOutput, error) { if params == nil { params = &DeleteAlarmsInput{} @@ -114,6 +121,12 @@ func (c *Client) addOperationDeleteAlarmsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteAlarmsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAnomalyDetector.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAnomalyDetector.go index c4f85fa0dfa..6f07d607dcb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAnomalyDetector.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteAnomalyDetector.go @@ -11,10 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the specified anomaly detection model from your account. For more -// information about how to delete an anomaly detection model, see Deleting an -// anomaly detection model (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Anomaly_Detection_Alarm.html#Delete_Anomaly_Detection_Model) -// in the CloudWatch User Guide. +// Deletes the specified anomaly detection model from your account. For more +// +// information about how to delete an anomaly detection model, see [Deleting an anomaly detection model]in the +// CloudWatch User Guide. +// +// [Deleting an anomaly detection model]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Anomaly_Detection_Alarm.html#Delete_Anomaly_Detection_Model func (c *Client) DeleteAnomalyDetector(ctx context.Context, params *DeleteAnomalyDetectorInput, optFns ...func(*Options)) (*DeleteAnomalyDetectorOutput, error) { if params == nil { params = &DeleteAnomalyDetectorInput{} @@ -37,14 +39,21 @@ type DeleteAnomalyDetectorInput struct { // Deprecated: Use SingleMetricAnomalyDetector. Dimensions []types.Dimension - // The metric math anomaly detector to be deleted. When using - // MetricMathAnomalyDetector , you cannot include following parameters in the same - // operation: + // The metric math anomaly detector to be deleted. + // + // When using MetricMathAnomalyDetector , you cannot include following parameters + // in the same operation: + // // - Dimensions , + // // - MetricName + // // - Namespace + // // - Stat + // // - the SingleMetricAnomalyDetector parameters of DeleteAnomalyDetectorInput + // // Instead, specify the metric math anomaly detector attributes as part of the // MetricMathAnomalyDetector property. MetricMathAnomalyDetector *types.MetricMathAnomalyDetector @@ -59,14 +68,21 @@ type DeleteAnomalyDetectorInput struct { // Deprecated: Use SingleMetricAnomalyDetector. Namespace *string - // A single metric anomaly detector to be deleted. When using - // SingleMetricAnomalyDetector , you cannot include the following parameters in the - // same operation: + // A single metric anomaly detector to be deleted. + // + // When using SingleMetricAnomalyDetector , you cannot include the following + // parameters in the same operation: + // // - Dimensions , + // // - MetricName + // // - Namespace + // // - Stat + // // - the MetricMathAnomalyDetector parameters of DeleteAnomalyDetectorInput + // // Instead, specify the single metric anomaly detector attributes as part of the // SingleMetricAnomalyDetector property. SingleMetricAnomalyDetector *types.SingleMetricAnomalyDetector @@ -141,6 +157,12 @@ func (c *Client) addOperationDeleteAnomalyDetectorMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteAnomalyDetectorValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteDashboards.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteDashboards.go index b3b45768512..e0258c17547 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteDashboards.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteDashboards.go @@ -99,6 +99,12 @@ func (c *Client) addOperationDeleteDashboardsMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteDashboardsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteInsightRules.go index 3ce1c6af29f..93418943768 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteInsightRules.go @@ -11,9 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Permanently deletes the specified Contributor Insights rules. If you create a -// rule, delete it, and then re-create it with the same name, historical data from -// the first time the rule was created might not be available. +// Permanently deletes the specified Contributor Insights rules. +// +// If you create a rule, delete it, and then re-create it with the same name, +// historical data from the first time the rule was created might not be available. func (c *Client) DeleteInsightRules(ctx context.Context, params *DeleteInsightRulesInput, optFns ...func(*Options)) (*DeleteInsightRulesOutput, error) { if params == nil { params = &DeleteInsightRulesInput{} @@ -32,8 +33,9 @@ func (c *Client) DeleteInsightRules(ctx context.Context, params *DeleteInsightRu type DeleteInsightRulesInput struct { // An array of the rule names to delete. If you need to find out the names of your - // rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html) - // . + // rules, use [DescribeInsightRules]. + // + // [DescribeInsightRules]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html // // This member is required. RuleNames []string @@ -108,6 +110,12 @@ func (c *Client) addOperationDeleteInsightRulesMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteInsightRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteMetricStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteMetricStream.go index be895dd089d..e4cac13631c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteMetricStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DeleteMetricStream.go @@ -98,6 +98,12 @@ func (c *Client) addOperationDeleteMetricStreamMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteMetricStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmHistory.go index 8e5b24fdbe1..00fa06d1ea3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmHistory.go @@ -14,11 +14,13 @@ import ( // Retrieves the history for the specified alarm. You can filter the results by // date range or item type. If an alarm name is not specified, the histories for -// either all metric alarms or all composite alarms are returned. CloudWatch -// retains the history of an alarm even if you delete the alarm. To use this -// operation and return information about a composite alarm, you must be signed on -// with the cloudwatch:DescribeAlarmHistory permission that is scoped to * . You -// can't return information about composite alarms if your +// either all metric alarms or all composite alarms are returned. +// +// CloudWatch retains the history of an alarm even if you delete the alarm. +// +// To use this operation and return information about a composite alarm, you must +// be signed on with the cloudwatch:DescribeAlarmHistory permission that is scoped +// to * . You can't return information about composite alarms if your // cloudwatch:DescribeAlarmHistory permission has a narrower scope. func (c *Client) DescribeAlarmHistory(ctx context.Context, params *DescribeAlarmHistoryInput, optFns ...func(*Options)) (*DescribeAlarmHistoryOutput, error) { if params == nil { @@ -138,6 +140,12 @@ func (c *Client) addOperationDescribeAlarmHistoryMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAlarmHistory(options.Region), middleware.Before); err != nil { return err } @@ -159,14 +167,6 @@ func (c *Client) addOperationDescribeAlarmHistoryMiddlewares(stack *middleware.S return nil } -// DescribeAlarmHistoryAPIClient is a client that implements the -// DescribeAlarmHistory operation. -type DescribeAlarmHistoryAPIClient interface { - DescribeAlarmHistory(context.Context, *DescribeAlarmHistoryInput, ...func(*Options)) (*DescribeAlarmHistoryOutput, error) -} - -var _ DescribeAlarmHistoryAPIClient = (*Client)(nil) - // DescribeAlarmHistoryPaginatorOptions is the paginator options for // DescribeAlarmHistory type DescribeAlarmHistoryPaginatorOptions struct { @@ -231,6 +231,9 @@ func (p *DescribeAlarmHistoryPaginator) NextPage(ctx context.Context, optFns ... } params.MaxRecords = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.DescribeAlarmHistory(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -250,6 +253,14 @@ func (p *DescribeAlarmHistoryPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// DescribeAlarmHistoryAPIClient is a client that implements the +// DescribeAlarmHistory operation. +type DescribeAlarmHistoryAPIClient interface { + DescribeAlarmHistory(context.Context, *DescribeAlarmHistoryInput, ...func(*Options)) (*DescribeAlarmHistoryOutput, error) +} + +var _ DescribeAlarmHistoryAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeAlarmHistory(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarms.go index cea6fcad8f1..73b8634627d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarms.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarms.go @@ -11,17 +11,18 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" + jmespath "github.com/jmespath/go-jmespath" "strconv" "time" ) // Retrieves the specified alarms. You can filter the results by specifying a -// prefix for the alarm name, the alarm state, or a prefix for any action. To use -// this operation and return information about composite alarms, you must be signed -// on with the cloudwatch:DescribeAlarms permission that is scoped to * . You can't -// return information about composite alarms if your cloudwatch:DescribeAlarms -// permission has a narrower scope. +// prefix for the alarm name, the alarm state, or a prefix for any action. +// +// To use this operation and return information about composite alarms, you must +// be signed on with the cloudwatch:DescribeAlarms permission that is scoped to * . +// You can't return information about composite alarms if your +// cloudwatch:DescribeAlarms permission has a narrower scope. func (c *Client) DescribeAlarms(ctx context.Context, params *DescribeAlarmsInput, optFns ...func(*Options)) (*DescribeAlarmsOutput, error) { if params == nil { params = &DescribeAlarmsInput{} @@ -45,8 +46,9 @@ type DescribeAlarmsInput struct { ActionPrefix *string // An alarm name prefix. If you specify this parameter, you receive information - // about all alarms that have names that start with this prefix. If this parameter - // is specified, you cannot specify AlarmNames . + // about all alarms that have names that start with this prefix. + // + // If this parameter is specified, you cannot specify AlarmNames . AlarmNamePrefix *string // The names of the alarms to retrieve information about. @@ -54,11 +56,14 @@ type DescribeAlarmsInput struct { // Use this parameter to specify whether you want the operation to return metric // alarms or composite alarms. If you omit this parameter, only metric alarms are - // returned, even if composite alarms exist in the account. For example, if you - // omit this parameter or specify MetricAlarms , the operation returns only a list - // of metric alarms. It does not return any composite alarms, even if composite - // alarms exist in the account. If you specify CompositeAlarms , the operation - // returns only a list of composite alarms, and does not return any metric alarms. + // returned, even if composite alarms exist in the account. + // + // For example, if you omit this parameter or specify MetricAlarms , the operation + // returns only a list of metric alarms. It does not return any composite alarms, + // even if composite alarms exist in the account. + // + // If you specify CompositeAlarms , the operation returns only a list of composite + // alarms, and does not return any metric alarms. AlarmTypes []types.AlarmType // If you use this parameter and specify the name of a composite alarm, the @@ -66,13 +71,17 @@ type DescribeAlarmsInput struct { // specify. These are the metric alarms and composite alarms referenced in the // AlarmRule field of the composite alarm that you specify in ChildrenOfAlarmName . // Information about the composite alarm that you name in ChildrenOfAlarmName is - // not returned. If you specify ChildrenOfAlarmName , you cannot specify any other - // parameters in the request except for MaxRecords and NextToken . If you do so, - // you receive a validation error. Only the Alarm Name , ARN , StateValue - // (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp information are - // returned by this operation when you use this parameter. To get complete - // information about these alarms, perform another DescribeAlarms operation and - // specify the parent alarm names in the AlarmNames parameter. + // not returned. + // + // If you specify ChildrenOfAlarmName , you cannot specify any other parameters in + // the request except for MaxRecords and NextToken . If you do so, you receive a + // validation error. + // + // Only the Alarm Name , ARN , StateValue (OK/ALARM/INSUFFICIENT_DATA), and + // StateUpdatedTimestamp information are returned by this operation when you use + // this parameter. To get complete information about these alarms, perform another + // DescribeAlarms operation and specify the parent alarm names in the AlarmNames + // parameter. ChildrenOfAlarmName *string // The maximum number of alarm descriptions to retrieve. @@ -86,11 +95,14 @@ type DescribeAlarmsInput struct { // the operation returns information about the "parent" alarms of the alarm you // specify. These are the composite alarms that have AlarmRule parameters that // reference the alarm named in ParentsOfAlarmName . Information about the alarm - // that you specify in ParentsOfAlarmName is not returned. If you specify - // ParentsOfAlarmName , you cannot specify any other parameters in the request - // except for MaxRecords and NextToken . If you do so, you receive a validation - // error. Only the Alarm Name and ARN are returned by this operation when you use - // this parameter. To get complete information about these alarms, perform another + // that you specify in ParentsOfAlarmName is not returned. + // + // If you specify ParentsOfAlarmName , you cannot specify any other parameters in + // the request except for MaxRecords and NextToken . If you do so, you receive a + // validation error. + // + // Only the Alarm Name and ARN are returned by this operation when you use this + // parameter. To get complete information about these alarms, perform another // DescribeAlarms operation and specify the parent alarm names in the AlarmNames // parameter. ParentsOfAlarmName *string @@ -174,6 +186,12 @@ func (c *Client) addOperationDescribeAlarmsMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAlarms(options.Region), middleware.Before); err != nil { return err } @@ -195,96 +213,6 @@ func (c *Client) addOperationDescribeAlarmsMiddlewares(stack *middleware.Stack, return nil } -// DescribeAlarmsAPIClient is a client that implements the DescribeAlarms -// operation. -type DescribeAlarmsAPIClient interface { - DescribeAlarms(context.Context, *DescribeAlarmsInput, ...func(*Options)) (*DescribeAlarmsOutput, error) -} - -var _ DescribeAlarmsAPIClient = (*Client)(nil) - -// DescribeAlarmsPaginatorOptions is the paginator options for DescribeAlarms -type DescribeAlarmsPaginatorOptions struct { - // The maximum number of alarm descriptions to retrieve. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// DescribeAlarmsPaginator is a paginator for DescribeAlarms -type DescribeAlarmsPaginator struct { - options DescribeAlarmsPaginatorOptions - client DescribeAlarmsAPIClient - params *DescribeAlarmsInput - nextToken *string - firstPage bool -} - -// NewDescribeAlarmsPaginator returns a new DescribeAlarmsPaginator -func NewDescribeAlarmsPaginator(client DescribeAlarmsAPIClient, params *DescribeAlarmsInput, optFns ...func(*DescribeAlarmsPaginatorOptions)) *DescribeAlarmsPaginator { - if params == nil { - params = &DescribeAlarmsInput{} - } - - options := DescribeAlarmsPaginatorOptions{} - if params.MaxRecords != nil { - options.Limit = *params.MaxRecords - } - - for _, fn := range optFns { - fn(&options) - } - - return &DescribeAlarmsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *DescribeAlarmsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next DescribeAlarms page. -func (p *DescribeAlarmsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeAlarmsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxRecords = limit - - result, err := p.client.DescribeAlarms(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - // AlarmExistsWaiterOptions are waiter options for AlarmExistsWaiter type AlarmExistsWaiterOptions struct { @@ -317,12 +245,13 @@ type AlarmExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeAlarmsInput, *DescribeAlarmsOutput, error) (bool, error) } @@ -398,7 +327,13 @@ func (w *AlarmExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeA } out, err := w.client.DescribeAlarms(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -494,12 +429,13 @@ type CompositeAlarmExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeAlarmsInput, *DescribeAlarmsOutput, error) (bool, error) } @@ -576,7 +512,13 @@ func (w *CompositeAlarmExistsWaiter) WaitForOutput(ctx context.Context, params * } out, err := w.client.DescribeAlarms(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -638,6 +580,99 @@ func compositeAlarmExistsStateRetryable(ctx context.Context, input *DescribeAlar return true, nil } +// DescribeAlarmsPaginatorOptions is the paginator options for DescribeAlarms +type DescribeAlarmsPaginatorOptions struct { + // The maximum number of alarm descriptions to retrieve. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeAlarmsPaginator is a paginator for DescribeAlarms +type DescribeAlarmsPaginator struct { + options DescribeAlarmsPaginatorOptions + client DescribeAlarmsAPIClient + params *DescribeAlarmsInput + nextToken *string + firstPage bool +} + +// NewDescribeAlarmsPaginator returns a new DescribeAlarmsPaginator +func NewDescribeAlarmsPaginator(client DescribeAlarmsAPIClient, params *DescribeAlarmsInput, optFns ...func(*DescribeAlarmsPaginatorOptions)) *DescribeAlarmsPaginator { + if params == nil { + params = &DescribeAlarmsInput{} + } + + options := DescribeAlarmsPaginatorOptions{} + if params.MaxRecords != nil { + options.Limit = *params.MaxRecords + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeAlarmsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeAlarmsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeAlarms page. +func (p *DescribeAlarmsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeAlarmsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxRecords = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeAlarms(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeAlarmsAPIClient is a client that implements the DescribeAlarms +// operation. +type DescribeAlarmsAPIClient interface { + DescribeAlarms(context.Context, *DescribeAlarmsInput, ...func(*Options)) (*DescribeAlarmsOutput, error) +} + +var _ DescribeAlarmsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeAlarms(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmsForMetric.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmsForMetric.go index 10f6a7891c3..f2efc2d24d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmsForMetric.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAlarmsForMetric.go @@ -12,10 +12,11 @@ import ( ) // Retrieves the alarms for the specified metric. To filter the results, specify a -// statistic, period, or unit. This operation retrieves only standard alarms that -// are based on the specified metric. It does not return alarms based on math -// expressions that use the specified metric, or composite alarms that use the -// specified metric. +// statistic, period, or unit. +// +// This operation retrieves only standard alarms that are based on the specified +// metric. It does not return alarms based on math expressions that use the +// specified metric, or composite alarms that use the specified metric. func (c *Client) DescribeAlarmsForMetric(ctx context.Context, params *DescribeAlarmsForMetricInput, optFns ...func(*Options)) (*DescribeAlarmsForMetricOutput, error) { if params == nil { params = &DescribeAlarmsForMetricInput{} @@ -129,6 +130,12 @@ func (c *Client) addOperationDescribeAlarmsForMetricMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeAlarmsForMetricValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAnomalyDetectors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAnomalyDetectors.go index 2b7d514966a..52be715d78d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAnomalyDetectors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeAnomalyDetectors.go @@ -45,8 +45,10 @@ type DescribeAnomalyDetectorsInput struct { Dimensions []types.Dimension // The maximum number of results to return in one operation. The maximum value - // that you can specify is 100. To retrieve the remaining results, make another - // call with the returned NextToken value. + // that you can specify is 100. + // + // To retrieve the remaining results, make another call with the returned NextToken + // value. MaxResults *int32 // Limits the results to only the anomaly detection models that are associated @@ -135,6 +137,12 @@ func (c *Client) addOperationDescribeAnomalyDetectorsMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeAnomalyDetectorsValidationMiddleware(stack); err != nil { return err } @@ -159,20 +167,14 @@ func (c *Client) addOperationDescribeAnomalyDetectorsMiddlewares(stack *middlewa return nil } -// DescribeAnomalyDetectorsAPIClient is a client that implements the -// DescribeAnomalyDetectors operation. -type DescribeAnomalyDetectorsAPIClient interface { - DescribeAnomalyDetectors(context.Context, *DescribeAnomalyDetectorsInput, ...func(*Options)) (*DescribeAnomalyDetectorsOutput, error) -} - -var _ DescribeAnomalyDetectorsAPIClient = (*Client)(nil) - // DescribeAnomalyDetectorsPaginatorOptions is the paginator options for // DescribeAnomalyDetectors type DescribeAnomalyDetectorsPaginatorOptions struct { // The maximum number of results to return in one operation. The maximum value - // that you can specify is 100. To retrieve the remaining results, make another - // call with the returned NextToken value. + // that you can specify is 100. + // + // To retrieve the remaining results, make another call with the returned NextToken + // value. Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -234,6 +236,9 @@ func (p *DescribeAnomalyDetectorsPaginator) NextPage(ctx context.Context, optFns } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.DescribeAnomalyDetectors(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -253,6 +258,14 @@ func (p *DescribeAnomalyDetectorsPaginator) NextPage(ctx context.Context, optFns return result, nil } +// DescribeAnomalyDetectorsAPIClient is a client that implements the +// DescribeAnomalyDetectors operation. +type DescribeAnomalyDetectorsAPIClient interface { + DescribeAnomalyDetectors(context.Context, *DescribeAnomalyDetectorsInput, ...func(*Options)) (*DescribeAnomalyDetectorsOutput, error) +} + +var _ DescribeAnomalyDetectorsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeAnomalyDetectors(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeInsightRules.go index b0956ebe2ad..a1eaff834a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DescribeInsightRules.go @@ -11,10 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of all the Contributor Insights rules in your account. For more -// information about Contributor Insights, see Using Contributor Insights to -// Analyze High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html) -// . +// Returns a list of all the Contributor Insights rules in your account. +// +// For more information about Contributor Insights, see [Using Contributor Insights to Analyze High-Cardinality Data]. +// +// [Using Contributor Insights to Analyze High-Cardinality Data]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html func (c *Client) DescribeInsightRules(ctx context.Context, params *DescribeInsightRulesInput, optFns ...func(*Options)) (*DescribeInsightRulesOutput, error) { if params == nil { params = &DescribeInsightRulesInput{} @@ -113,6 +114,12 @@ func (c *Client) addOperationDescribeInsightRulesMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInsightRules(options.Region), middleware.Before); err != nil { return err } @@ -134,14 +141,6 @@ func (c *Client) addOperationDescribeInsightRulesMiddlewares(stack *middleware.S return nil } -// DescribeInsightRulesAPIClient is a client that implements the -// DescribeInsightRules operation. -type DescribeInsightRulesAPIClient interface { - DescribeInsightRules(context.Context, *DescribeInsightRulesInput, ...func(*Options)) (*DescribeInsightRulesOutput, error) -} - -var _ DescribeInsightRulesAPIClient = (*Client)(nil) - // DescribeInsightRulesPaginatorOptions is the paginator options for // DescribeInsightRules type DescribeInsightRulesPaginatorOptions struct { @@ -207,6 +206,9 @@ func (p *DescribeInsightRulesPaginator) NextPage(ctx context.Context, optFns ... } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.DescribeInsightRules(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -226,6 +228,14 @@ func (p *DescribeInsightRulesPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// DescribeInsightRulesAPIClient is a client that implements the +// DescribeInsightRules operation. +type DescribeInsightRulesAPIClient interface { + DescribeInsightRules(context.Context, *DescribeInsightRulesInput, ...func(*Options)) (*DescribeInsightRulesOutput, error) +} + +var _ DescribeInsightRulesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeInsightRules(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableAlarmActions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableAlarmActions.go index cc1d538e42f..22aa5c3ef23 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableAlarmActions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableAlarmActions.go @@ -99,6 +99,12 @@ func (c *Client) addOperationDisableAlarmActionsMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDisableAlarmActionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableInsightRules.go index 9de449e1ef8..ff642cc985b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_DisableInsightRules.go @@ -31,8 +31,9 @@ func (c *Client) DisableInsightRules(ctx context.Context, params *DisableInsight type DisableInsightRulesInput struct { // An array of the rule names to disable. If you need to find out the names of - // your rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html) - // . + // your rules, use [DescribeInsightRules]. + // + // [DescribeInsightRules]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html // // This member is required. RuleNames []string @@ -107,6 +108,12 @@ func (c *Client) addOperationDisableInsightRulesMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDisableInsightRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableAlarmActions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableAlarmActions.go index 5c0a98f5d24..71a83b61f7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableAlarmActions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableAlarmActions.go @@ -98,6 +98,12 @@ func (c *Client) addOperationEnableAlarmActionsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpEnableAlarmActionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableInsightRules.go index 9e25e9f982f..a4e5f65b80b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_EnableInsightRules.go @@ -31,8 +31,9 @@ func (c *Client) EnableInsightRules(ctx context.Context, params *EnableInsightRu type EnableInsightRulesInput struct { // An array of the rule names to enable. If you need to find out the names of your - // rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html) - // . + // rules, use [DescribeInsightRules]. + // + // [DescribeInsightRules]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html // // This member is required. RuleNames []string @@ -107,6 +108,12 @@ func (c *Client) addOperationEnableInsightRulesMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpEnableInsightRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetDashboard.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetDashboard.go index d31711302ad..0835fe77a32 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetDashboard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetDashboard.go @@ -10,10 +10,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Displays the details of the dashboard that you specify. To copy an existing -// dashboard, use GetDashboard , and then use the data returned within -// DashboardBody as the template for the new dashboard when you call PutDashboard -// to create the copy. +// Displays the details of the dashboard that you specify. +// +// To copy an existing dashboard, use GetDashboard , and then use the data returned +// within DashboardBody as the template for the new dashboard when you call +// PutDashboard to create the copy. func (c *Client) GetDashboard(ctx context.Context, params *GetDashboardInput, optFns ...func(*Options)) (*GetDashboardOutput, error) { if params == nil { params = &GetDashboardInput{} @@ -46,8 +47,9 @@ type GetDashboardOutput struct { // The detailed information about the dashboard, including what widgets are // included and their location on the dashboard. For more information about the - // DashboardBody syntax, see Dashboard Body Structure and Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html) - // . + // DashboardBody syntax, see [Dashboard Body Structure and Syntax]. + // + // [Dashboard Body Structure and Syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html DashboardBody *string // The name of the dashboard. @@ -114,6 +116,12 @@ func (c *Client) addOperationGetDashboardMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetDashboardValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetInsightRuleReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetInsightRuleReport.go index d7f4e8ff4bf..40a141b2a07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetInsightRuleReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetInsightRuleReport.go @@ -14,24 +14,35 @@ import ( // This operation returns the time series data collected by a Contributor Insights // rule. The data includes the identity and number of contributors to the log -// group. You can also optionally return one or more statistics about each data -// point in the time series. These statistics can include the following: +// group. +// +// You can also optionally return one or more statistics about each data point in +// the time series. These statistics can include the following: +// // - UniqueContributors -- the number of unique contributors for each data point. +// // - MaxContributorValue -- the value of the top contributor for each data point. // The identity of the contributor might change for each data point in the graph. -// If this rule aggregates by COUNT, the top contributor for each data point is the -// contributor with the most occurrences in that period. If the rule aggregates by -// SUM, the top contributor is the contributor with the highest sum in the log -// field specified by the rule's Value , during that period. -// - SampleCount -- the number of data points matched by the rule. -// - Sum -- the sum of the values from all contributors during the time period -// represented by that data point. -// - Minimum -- the minimum value from a single observation during the time -// period represented by that data point. -// - Maximum -- the maximum value from a single observation during the time -// period represented by that data point. -// - Average -- the average value from all contributors during the time period -// represented by that data point. +// +// If this rule aggregates by COUNT, the top contributor for each data point is +// +// the contributor with the most occurrences in that period. If the rule aggregates +// by SUM, the top contributor is the contributor with the highest sum in the log +// field specified by the rule's Value , during that period. +// +// - SampleCount -- the number of data points matched by the rule. +// +// - Sum -- the sum of the values from all contributors during the time period +// represented by that data point. +// +// - Minimum -- the minimum value from a single observation during the time +// period represented by that data point. +// +// - Maximum -- the maximum value from a single observation during the time +// period represented by that data point. +// +// - Average -- the average value from all contributors during the time period +// represented by that data point. func (c *Client) GetInsightRuleReport(ctx context.Context, params *GetInsightRuleReportInput, optFns ...func(*Options)) (*GetInsightRuleReportOutput, error) { if params == nil { params = &GetInsightRuleReportInput{} @@ -80,20 +91,28 @@ type GetInsightRuleReportInput struct { // Specifies which metrics to use for aggregation of contributor values for the // report. You can specify one or more of the following metrics: + // // - UniqueContributors -- the number of unique contributors for each data point. + // // - MaxContributorValue -- the value of the top contributor for each data point. // The identity of the contributor might change for each data point in the graph. - // If this rule aggregates by COUNT, the top contributor for each data point is the - // contributor with the most occurrences in that period. If the rule aggregates by - // SUM, the top contributor is the contributor with the highest sum in the log + // + // If this rule aggregates by COUNT, the top contributor for each data point is + // the contributor with the most occurrences in that period. If the rule aggregates + // by SUM, the top contributor is the contributor with the highest sum in the log // field specified by the rule's Value , during that period. + // // - SampleCount -- the number of data points matched by the rule. + // // - Sum -- the sum of the values from all contributors during the time period // represented by that data point. + // // - Minimum -- the minimum value from a single observation during the time // period represented by that data point. + // // - Maximum -- the maximum value from a single observation during the time // period represented by that data point. + // // - Average -- the average value from all contributors during the time period // represented by that data point. Metrics []string @@ -193,6 +212,12 @@ func (c *Client) addOperationGetInsightRuleReportMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetInsightRuleReportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricData.go index c1dc7d95fcd..906a271cb14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricData.go @@ -14,31 +14,37 @@ import ( // You can use the GetMetricData API to retrieve CloudWatch metric values. The // operation can also include a CloudWatch Metrics Insights query, and one or more -// metric math functions. A GetMetricData operation that does not include a query -// can retrieve as many as 500 different metrics in a single request, with a total -// of as many as 100,800 data points. You can also optionally perform metric math -// expressions on the values of the returned statistics, to create new time series -// that represent new insights into your data. For example, using Lambda metrics, -// you could divide the Errors metric by the Invocations metric to get an error -// rate time series. For more information about metric math expressions, see -// Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) -// in the Amazon CloudWatch User Guide. If you include a Metrics Insights query, -// each GetMetricData operation can include only one query. But the same -// GetMetricData operation can also retrieve other metrics. Metrics Insights -// queries can query only the most recent three hours of metric data. For more -// information about Metrics Insights, see Query your metrics with CloudWatch -// Metrics Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/query_with_cloudwatch-metrics-insights.html) -// . Calls to the GetMetricData API have a different pricing structure than calls -// to GetMetricStatistics . For more information about pricing, see Amazon -// CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/) . Amazon -// CloudWatch retains metric data as follows: +// metric math functions. +// +// A GetMetricData operation that does not include a query can retrieve as many as +// 500 different metrics in a single request, with a total of as many as 100,800 +// data points. You can also optionally perform metric math expressions on the +// values of the returned statistics, to create new time series that represent new +// insights into your data. For example, using Lambda metrics, you could divide the +// Errors metric by the Invocations metric to get an error rate time series. For +// more information about metric math expressions, see [Metric Math Syntax and Functions]in the Amazon CloudWatch +// User Guide. +// +// If you include a Metrics Insights query, each GetMetricData operation can +// include only one query. But the same GetMetricData operation can also retrieve +// other metrics. Metrics Insights queries can query only the most recent three +// hours of metric data. For more information about Metrics Insights, see [Query your metrics with CloudWatch Metrics Insights]. +// +// Calls to the GetMetricData API have a different pricing structure than calls to +// GetMetricStatistics . For more information about pricing, see [Amazon CloudWatch Pricing]. +// +// Amazon CloudWatch retains metric data as follows: +// // - Data points with a period of less than 60 seconds are available for 3 // hours. These data points are high-resolution metrics and are available only for // custom metrics that have been defined with a StorageResolution of 1. +// // - Data points with a period of 60 seconds (1-minute) are available for 15 // days. +// // - Data points with a period of 300 seconds (5-minute) are available for 63 // days. +// // - Data points with a period of 3600 seconds (1 hour) are available for 455 // days (15 months). // @@ -47,20 +53,28 @@ import ( // of 1 minute, the data remains available for 15 days with 1-minute resolution. // After 15 days, this data is still available, but is aggregated and retrievable // only with a resolution of 5 minutes. After 63 days, the data is further -// aggregated and is available with a resolution of 1 hour. If you omit Unit in -// your request, all data that was collected with any unit is returned, along with -// the corresponding units that were specified when the data was reported to -// CloudWatch. If you specify a unit, the operation returns only data that was -// collected with that unit specified. If you specify a unit that does not match -// the data collected, the results of the operation are null. CloudWatch does not -// perform unit conversions. Using Metrics Insights queries with metric math You -// can't mix a Metric Insights query and metric math syntax in the same expression, -// but you can reference results from a Metrics Insights query within other Metric -// math expressions. A Metrics Insights query without a GROUP BY clause returns a -// single time-series (TS), and can be used as input for a metric math expression -// that expects a single time series. A Metrics Insights query with a GROUP BY -// clause returns an array of time-series (TS[]), and can be used as input for a -// metric math expression that expects an array of time series. +// aggregated and is available with a resolution of 1 hour. +// +// If you omit Unit in your request, all data that was collected with any unit is +// returned, along with the corresponding units that were specified when the data +// was reported to CloudWatch. If you specify a unit, the operation returns only +// data that was collected with that unit specified. If you specify a unit that +// does not match the data collected, the results of the operation are null. +// CloudWatch does not perform unit conversions. +// +// # Using Metrics Insights queries with metric math +// +// You can't mix a Metric Insights query and metric math syntax in the same +// expression, but you can reference results from a Metrics Insights query within +// other Metric math expressions. A Metrics Insights query without a GROUP BY +// clause returns a single time-series (TS), and can be used as input for a metric +// math expression that expects a single time series. A Metrics Insights query with +// a GROUP BY clause returns an array of time-series (TS[]), and can be used as +// input for a metric math expression that expects an array of time series. +// +// [Metric Math Syntax and Functions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax +// [Query your metrics with CloudWatch Metrics Insights]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/query_with_cloudwatch-metrics-insights.html +// [Amazon CloudWatch Pricing]: https://aws.amazon.com/cloudwatch/pricing/ func (c *Client) GetMetricData(ctx context.Context, params *GetMetricDataInput, optFns ...func(*Options)) (*GetMetricDataOutput, error) { if params == nil { params = &GetMetricDataInput{} @@ -78,13 +92,16 @@ func (c *Client) GetMetricData(ctx context.Context, params *GetMetricDataInput, type GetMetricDataInput struct { - // The time stamp indicating the latest data to be returned. The value specified - // is exclusive; results include data points up to the specified time stamp. For - // better performance, specify StartTime and EndTime values that align with the - // value of the metric's Period and sync up with the beginning and end of an hour. - // For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 - // as EndTime can get a faster response from CloudWatch than setting 12:07 or - // 12:29 as the EndTime . + // The time stamp indicating the latest data to be returned. + // + // The value specified is exclusive; results include data points up to the + // specified time stamp. + // + // For better performance, specify StartTime and EndTime values that align with + // the value of the metric's Period and sync up with the beginning and end of an + // hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or + // 12:30 as EndTime can get a faster response from CloudWatch than setting 12:07 + // or 12:29 as the EndTime . // // This member is required. EndTime *time.Time @@ -97,26 +114,35 @@ type GetMetricDataInput struct { // This member is required. MetricDataQueries []types.MetricDataQuery - // The time stamp indicating the earliest data to be returned. The value specified - // is inclusive; results include data points with the specified time stamp. + // The time stamp indicating the earliest data to be returned. + // + // The value specified is inclusive; results include data points with the + // specified time stamp. + // // CloudWatch rounds the specified time stamp as follows: + // // - Start time less than 15 days ago - Round down to the nearest whole minute. // For example, 12:32:34 is rounded down to 12:32:00. + // // - Start time between 15 and 63 days ago - Round down to the nearest 5-minute // clock interval. For example, 12:32:34 is rounded down to 12:30:00. + // // - Start time greater than 63 days ago - Round down to the nearest 1-hour // clock interval. For example, 12:32:34 is rounded down to 12:00:00. + // // If you set Period to 5, 10, or 30, the start time of your request is rounded // down to the nearest time that corresponds to even 5-, 10-, or 30-second // divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 // for the previous 10-second period, the start time of your request is rounded // down and you receive data from 01:05:10 to 01:05:20. If you make a query at // 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you - // receive data timestamped between 15:02:15 and 15:07:15. For better performance, - // specify StartTime and EndTime values that align with the value of the metric's - // Period and sync up with the beginning and end of an hour. For example, if the - // Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get - // a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime . + // receive data timestamped between 15:02:15 and 15:07:15. + // + // For better performance, specify StartTime and EndTime values that align with + // the value of the metric's Period and sync up with the beginning and end of an + // hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or + // 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 + // or 12:29 as the StartTime . // // This member is required. StartTime *time.Time @@ -137,8 +163,9 @@ type GetMetricDataInput struct { // The order in which data points should be returned. TimestampDescending returns // the newest data first and paginates when the MaxDatapoints limit is reached. // TimestampAscending returns the oldest data first and paginates when the - // MaxDatapoints limit is reached. If you omit this parameter, the default of - // TimestampDescending is used. + // MaxDatapoints limit is reached. + // + // If you omit this parameter, the default of TimestampDescending is used. ScanBy types.ScanBy noSmithyDocumentSerde @@ -149,10 +176,11 @@ type GetMetricDataOutput struct { // Contains a message about this GetMetricData operation, if the operation results // in such a message. An example of a message that might be returned is Maximum // number of allowed metrics exceeded . If there is a message, as much of the - // operation as possible is still executed. A message appears here only if it is - // related to the global GetMetricData operation. Any message about a specific - // metric returned by the operation appears in the MetricDataResult object - // returned for that metric. + // operation as possible is still executed. + // + // A message appears here only if it is related to the global GetMetricData + // operation. Any message about a specific metric returned by the operation appears + // in the MetricDataResult object returned for that metric. Messages []types.MessageData // The metrics that are returned, including the metric name, namespace, and @@ -223,6 +251,12 @@ func (c *Client) addOperationGetMetricDataMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetMetricDataValidationMiddleware(stack); err != nil { return err } @@ -247,13 +281,6 @@ func (c *Client) addOperationGetMetricDataMiddlewares(stack *middleware.Stack, o return nil } -// GetMetricDataAPIClient is a client that implements the GetMetricData operation. -type GetMetricDataAPIClient interface { - GetMetricData(context.Context, *GetMetricDataInput, ...func(*Options)) (*GetMetricDataOutput, error) -} - -var _ GetMetricDataAPIClient = (*Client)(nil) - // GetMetricDataPaginatorOptions is the paginator options for GetMetricData type GetMetricDataPaginatorOptions struct { // The maximum number of data points the request should return before paginating. @@ -318,6 +345,9 @@ func (p *GetMetricDataPaginator) NextPage(ctx context.Context, optFns ...func(*O } params.MaxDatapoints = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.GetMetricData(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -337,6 +367,13 @@ func (p *GetMetricDataPaginator) NextPage(ctx context.Context, optFns ...func(*O return result, nil } +// GetMetricDataAPIClient is a client that implements the GetMetricData operation. +type GetMetricDataAPIClient interface { + GetMetricData(context.Context, *GetMetricDataInput, ...func(*Options)) (*GetMetricDataOutput, error) +} + +var _ GetMetricDataAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opGetMetricData(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStatistics.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStatistics.go index 5ace4e4d187..371f02a0edc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStatistics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStatistics.go @@ -12,31 +12,43 @@ import ( "time" ) -// Gets statistics for the specified metric. The maximum number of data points -// returned from a single call is 1,440. If you request more than 1,440 data -// points, CloudWatch returns an error. To reduce the number of data points, you -// can narrow the specified time range and make multiple requests across adjacent -// time ranges, or you can increase the specified period. Data points are not -// returned in chronological order. CloudWatch aggregates data points based on the -// length of the period that you specify. For example, if you request statistics -// with a one-hour period, CloudWatch aggregates all data points with time stamps -// that fall within each one-hour period. Therefore, the number of values -// aggregated by CloudWatch is larger than the number of data points returned. +// Gets statistics for the specified metric. +// +// The maximum number of data points returned from a single call is 1,440. If you +// request more than 1,440 data points, CloudWatch returns an error. To reduce the +// number of data points, you can narrow the specified time range and make multiple +// requests across adjacent time ranges, or you can increase the specified period. +// Data points are not returned in chronological order. +// +// CloudWatch aggregates data points based on the length of the period that you +// specify. For example, if you request statistics with a one-hour period, +// CloudWatch aggregates all data points with time stamps that fall within each +// one-hour period. Therefore, the number of values aggregated by CloudWatch is +// larger than the number of data points returned. +// // CloudWatch needs raw data points to calculate percentile statistics. If you // publish data using a statistic set instead, you can only retrieve percentile // statistics for this data if one of the following conditions is true: +// // - The SampleCount value of the statistic set is 1. +// // - The Min and the Max values of the statistic set are equal. // // Percentile statistics are not available for metrics when any of the metric -// values are negative numbers. Amazon CloudWatch retains metric data as follows: +// values are negative numbers. +// +// Amazon CloudWatch retains metric data as follows: +// // - Data points with a period of less than 60 seconds are available for 3 // hours. These data points are high-resolution metrics and are available only for // custom metrics that have been defined with a StorageResolution of 1. +// // - Data points with a period of 60 seconds (1-minute) are available for 15 // days. +// // - Data points with a period of 300 seconds (5-minute) are available for 63 // days. +// // - Data points with a period of 3600 seconds (1 hour) are available for 455 // days (15 months). // @@ -45,11 +57,14 @@ import ( // of 1 minute, the data remains available for 15 days with 1-minute resolution. // After 15 days, this data is still available, but is aggregated and retrievable // only with a resolution of 5 minutes. After 63 days, the data is further -// aggregated and is available with a resolution of 1 hour. CloudWatch started -// retaining 5-minute and 1-hour metric data as of July 9, 2016. For information -// about metrics and dimensions supported by Amazon Web Services services, see the -// Amazon CloudWatch Metrics and Dimensions Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html) -// in the Amazon CloudWatch User Guide. +// aggregated and is available with a resolution of 1 hour. +// +// CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016. +// +// For information about metrics and dimensions supported by Amazon Web Services +// services, see the [Amazon CloudWatch Metrics and Dimensions Reference]in the Amazon CloudWatch User Guide. +// +// [Amazon CloudWatch Metrics and Dimensions Reference]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html func (c *Client) GetMetricStatistics(ctx context.Context, params *GetMetricStatisticsInput, optFns ...func(*Options)) (*GetMetricStatisticsOutput, error) { if params == nil { params = &GetMetricStatisticsInput{} @@ -67,10 +82,11 @@ func (c *Client) GetMetricStatistics(ctx context.Context, params *GetMetricStati type GetMetricStatisticsInput struct { - // The time stamp that determines the last data point to return. The value - // specified is exclusive; results include data points up to the specified time - // stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for - // example, 2016-10-10T23:00:00Z). + // The time stamp that determines the last data point to return. + // + // The value specified is exclusive; results include data points up to the + // specified time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 + // UTC format (for example, 2016-10-10T23:00:00Z). // // This member is required. EndTime *time.Time @@ -90,13 +106,18 @@ type GetMetricStatisticsInput struct { // be a multiple of 60. For high-resolution metrics that are collected at intervals // of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of // 60. High-resolution metrics are those metrics stored by a PutMetricData call - // that includes a StorageResolution of 1 second. If the StartTime parameter - // specifies a time stamp that is greater than 3 hours ago, you must specify the - // period as follows or no data points in that time range is returned: + // that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 hours + // ago, you must specify the period as follows or no data points in that time range + // is returned: + // // - Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds // (1 minute). + // // - Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 // minutes). + // // - Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 // hour). // @@ -104,17 +125,23 @@ type GetMetricStatisticsInput struct { Period *int32 // The time stamp that determines the first data point to return. Start times are - // evaluated relative to the time that CloudWatch receives the request. The value - // specified is inclusive; results include data points with the specified time - // stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for - // example, 2016-10-03T23:00:00Z). CloudWatch rounds the specified time stamp as - // follows: + // evaluated relative to the time that CloudWatch receives the request. + // + // The value specified is inclusive; results include data points with the + // specified time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 + // UTC format (for example, 2016-10-03T23:00:00Z). + // + // CloudWatch rounds the specified time stamp as follows: + // // - Start time less than 15 days ago - Round down to the nearest whole minute. // For example, 12:32:34 is rounded down to 12:32:00. + // // - Start time between 15 and 63 days ago - Round down to the nearest 5-minute // clock interval. For example, 12:32:34 is rounded down to 12:30:00. + // // - Start time greater than 63 days ago - Round down to the nearest 1-hour // clock interval. For example, 12:32:34 is rounded down to 12:00:00. + // // If you set Period to 5, 10, or 30, the start time of your request is rounded // down to the nearest time that corresponds to even 5-, 10-, or 30-second // divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 @@ -130,11 +157,12 @@ type GetMetricStatisticsInput struct { // value for each dimension. CloudWatch treats each unique combination of // dimensions as a separate metric. If a specific combination of dimensions was not // published, you can't retrieve statistics for it. You must specify the same - // dimensions that were used when the metrics were created. For an example, see - // Dimension Combinations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations) - // in the Amazon CloudWatch User Guide. For more information about specifying - // dimensions, see Publishing Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) - // in the Amazon CloudWatch User Guide. + // dimensions that were used when the metrics were created. For an example, see [Dimension Combinations]in + // the Amazon CloudWatch User Guide. For more information about specifying + // dimensions, see [Publishing Metrics]in the Amazon CloudWatch User Guide. + // + // [Dimension Combinations]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations + // [Publishing Metrics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html Dimensions []types.Dimension // The percentile statistics. Specify values between p0.0 and p100. When calling @@ -228,6 +256,12 @@ func (c *Client) addOperationGetMetricStatisticsMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetMetricStatisticsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStream.go index 84e4dfaeb40..79376b96496 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricStream.go @@ -72,9 +72,10 @@ type GetMetricStreamOutput struct { Name *string // The output format for the stream. Valid values are json , opentelemetry1.0 , and - // opentelemetry0.7 . For more information about metric stream output formats, see - // Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html) + // opentelemetry0.7 . For more information about metric stream output formats, see [Metric streams output formats] // . + // + // [Metric streams output formats]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html OutputFormat types.MetricStreamOutputFormat // The ARN of the IAM role that is used by this metric stream. @@ -85,8 +86,9 @@ type GetMetricStreamOutput struct { // Each entry in this array displays information about one or more metrics that // include additional statistics in the metric stream. For more information about - // the additional statistics, see CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html) - // . + // the additional statistics, see [CloudWatch statistics definitions]. + // + // [CloudWatch statistics definitions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html StatisticsConfigurations []types.MetricStreamStatisticsConfiguration // Metadata pertaining to the operation's result. @@ -150,6 +152,12 @@ func (c *Client) addOperationGetMetricStreamMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetMetricStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricWidgetImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricWidgetImage.go index ff8376956b0..cc7dc52dac1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricWidgetImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_GetMetricWidgetImage.go @@ -14,11 +14,16 @@ import ( // more Amazon CloudWatch metrics as a bitmap image. You can then embed this image // into your services and products, such as wiki pages, reports, and documents. You // could also retrieve images regularly, such as every minute, and create your own -// custom live dashboard. The graph you retrieve can include all CloudWatch metric -// graph features, including metric math and horizontal and vertical annotations. +// custom live dashboard. +// +// The graph you retrieve can include all CloudWatch metric graph features, +// including metric math and horizontal and vertical annotations. +// // There is a limit of 20 transactions per second for this API. Each // GetMetricWidgetImage action has the following limits: +// // - As many as 100 metrics in the graph. +// // - Up to 100 KB uncompressed payload. func (c *Client) GetMetricWidgetImage(ctx context.Context, params *GetMetricWidgetImageInput, optFns ...func(*Options)) (*GetMetricWidgetImageOutput, error) { if params == nil { @@ -40,18 +45,25 @@ type GetMetricWidgetImageInput struct { // A JSON string that defines the bitmap graph to be retrieved. The string // includes the metrics to include in the graph, statistics, annotations, title, // axis limits, and so on. You can include only one MetricWidget parameter in each - // GetMetricWidgetImage call. For more information about the syntax of MetricWidget - // see GetMetricWidgetImage: Metric Widget Structure and Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Metric-Widget-Structure.html) - // . If any metric on the graph could not load all the requested data points, an + // GetMetricWidgetImage call. + // + // For more information about the syntax of MetricWidget see [GetMetricWidgetImage: Metric Widget Structure and Syntax]. + // + // If any metric on the graph could not load all the requested data points, an // orange triangle with an exclamation point appears next to the graph legend. // + // [GetMetricWidgetImage: Metric Widget Structure and Syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Metric-Widget-Structure.html + // // This member is required. MetricWidget *string - // The format of the resulting image. Only PNG images are supported. The default - // is png . If you specify png , the API returns an HTTP response with the - // content-type set to text/xml . The image data is in a MetricWidgetImage field. - // For example: > + // The format of the resulting image. Only PNG images are supported. + // + // The default is png . If you specify png , the API returns an HTTP response with + // the content-type set to text/xml . The image data is in a MetricWidgetImage + // field. For example: + // + // > // // iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip... // @@ -133,6 +145,12 @@ func (c *Client) addOperationGetMetricWidgetImageMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetMetricWidgetImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListDashboards.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListDashboards.go index 89679e6eca5..07a7da914e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListDashboards.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListDashboards.go @@ -13,10 +13,11 @@ import ( // Returns a list of the dashboards for your account. If you include // DashboardNamePrefix , only those dashboards with names starting with the prefix -// are listed. Otherwise, all dashboards in your account are listed. ListDashboards -// returns up to 1000 results on one page. If there are more than 1000 dashboards, -// you can call ListDashboards again and include the value you received for -// NextToken in the first call, to receive the next 1000 results. +// are listed. Otherwise, all dashboards in your account are listed. +// +// ListDashboards returns up to 1000 results on one page. If there are more than +// 1000 dashboards, you can call ListDashboards again and include the value you +// received for NextToken in the first call, to receive the next 1000 results. func (c *Client) ListDashboards(ctx context.Context, params *ListDashboardsInput, optFns ...func(*Options)) (*ListDashboardsOutput, error) { if params == nil { params = &ListDashboardsInput{} @@ -115,6 +116,12 @@ func (c *Client) addOperationListDashboardsMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDashboards(options.Region), middleware.Before); err != nil { return err } @@ -136,14 +143,6 @@ func (c *Client) addOperationListDashboardsMiddlewares(stack *middleware.Stack, return nil } -// ListDashboardsAPIClient is a client that implements the ListDashboards -// operation. -type ListDashboardsAPIClient interface { - ListDashboards(context.Context, *ListDashboardsInput, ...func(*Options)) (*ListDashboardsOutput, error) -} - -var _ ListDashboardsAPIClient = (*Client)(nil) - // ListDashboardsPaginatorOptions is the paginator options for ListDashboards type ListDashboardsPaginatorOptions struct { // Set to true if pagination should stop if the service returns a pagination token @@ -195,6 +194,9 @@ func (p *ListDashboardsPaginator) NextPage(ctx context.Context, optFns ...func(* params := *p.params params.NextToken = p.nextToken + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDashboards(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -214,6 +216,14 @@ func (p *ListDashboardsPaginator) NextPage(ctx context.Context, optFns ...func(* return result, nil } +// ListDashboardsAPIClient is a client that implements the ListDashboards +// operation. +type ListDashboardsAPIClient interface { + ListDashboards(context.Context, *ListDashboardsInput, ...func(*Options)) (*ListDashboardsOutput, error) +} + +var _ ListDashboardsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDashboards(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListManagedInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListManagedInsightRules.go index 7c659a0448e..04d9bf35970 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListManagedInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListManagedInsightRules.go @@ -11,7 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list that contains the number of managed Contributor Insights rules +// Returns a list that contains the number of managed Contributor Insights rules +// // in your account. func (c *Client) ListManagedInsightRules(ctx context.Context, params *ListManagedInsightRulesInput, optFns ...func(*Options)) (*ListManagedInsightRulesOutput, error) { if params == nil { @@ -30,17 +31,17 @@ func (c *Client) ListManagedInsightRules(ctx context.Context, params *ListManage type ListManagedInsightRulesInput struct { - // The ARN of an Amazon Web Services resource that has managed Contributor + // The ARN of an Amazon Web Services resource that has managed Contributor // Insights rules. // // This member is required. ResourceARN *string - // The maximum number of results to return in one operation. If you omit this + // The maximum number of results to return in one operation. If you omit this // parameter, the default number is used. The default number is 100 . MaxResults *int32 - // Include this value to get the next set of rules if the value was returned by + // Include this value to get the next set of rules if the value was returned by // the previous operation. NextToken *string @@ -49,11 +50,11 @@ type ListManagedInsightRulesInput struct { type ListManagedInsightRulesOutput struct { - // The managed rules that are available for the specified Amazon Web Services + // The managed rules that are available for the specified Amazon Web Services // resource. ManagedRules []types.ManagedRuleDescription - // Include this value to get the next set of rules if the value was returned by + // Include this value to get the next set of rules if the value was returned by // the previous operation. NextToken *string @@ -118,6 +119,12 @@ func (c *Client) addOperationListManagedInsightRulesMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListManagedInsightRulesValidationMiddleware(stack); err != nil { return err } @@ -142,18 +149,10 @@ func (c *Client) addOperationListManagedInsightRulesMiddlewares(stack *middlewar return nil } -// ListManagedInsightRulesAPIClient is a client that implements the -// ListManagedInsightRules operation. -type ListManagedInsightRulesAPIClient interface { - ListManagedInsightRules(context.Context, *ListManagedInsightRulesInput, ...func(*Options)) (*ListManagedInsightRulesOutput, error) -} - -var _ ListManagedInsightRulesAPIClient = (*Client)(nil) - // ListManagedInsightRulesPaginatorOptions is the paginator options for // ListManagedInsightRules type ListManagedInsightRulesPaginatorOptions struct { - // The maximum number of results to return in one operation. If you omit this + // The maximum number of results to return in one operation. If you omit this // parameter, the default number is used. The default number is 100 . Limit int32 @@ -216,6 +215,9 @@ func (p *ListManagedInsightRulesPaginator) NextPage(ctx context.Context, optFns } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListManagedInsightRules(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -235,6 +237,14 @@ func (p *ListManagedInsightRulesPaginator) NextPage(ctx context.Context, optFns return result, nil } +// ListManagedInsightRulesAPIClient is a client that implements the +// ListManagedInsightRules operation. +type ListManagedInsightRulesAPIClient interface { + ListManagedInsightRules(context.Context, *ListManagedInsightRulesInput, ...func(*Options)) (*ListManagedInsightRulesOutput, error) +} + +var _ ListManagedInsightRulesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListManagedInsightRules(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetricStreams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetricStreams.go index d53085f7ebc..d785905fe72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetricStreams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetricStreams.go @@ -109,6 +109,12 @@ func (c *Client) addOperationListMetricStreamsMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMetricStreams(options.Region), middleware.Before); err != nil { return err } @@ -130,14 +136,6 @@ func (c *Client) addOperationListMetricStreamsMiddlewares(stack *middleware.Stac return nil } -// ListMetricStreamsAPIClient is a client that implements the ListMetricStreams -// operation. -type ListMetricStreamsAPIClient interface { - ListMetricStreams(context.Context, *ListMetricStreamsInput, ...func(*Options)) (*ListMetricStreamsOutput, error) -} - -var _ ListMetricStreamsAPIClient = (*Client)(nil) - // ListMetricStreamsPaginatorOptions is the paginator options for ListMetricStreams type ListMetricStreamsPaginatorOptions struct { // The maximum number of results to return in one operation. @@ -201,6 +199,9 @@ func (p *ListMetricStreamsPaginator) NextPage(ctx context.Context, optFns ...fun } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListMetricStreams(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -220,6 +221,14 @@ func (p *ListMetricStreamsPaginator) NextPage(ctx context.Context, optFns ...fun return result, nil } +// ListMetricStreamsAPIClient is a client that implements the ListMetricStreams +// operation. +type ListMetricStreamsAPIClient interface { + ListMetricStreams(context.Context, *ListMetricStreamsInput, ...func(*Options)) (*ListMetricStreamsOutput, error) +} + +var _ ListMetricStreamsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListMetricStreams(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetrics.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetrics.go index d4e4febbae1..a7a0ee12ea8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetrics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListMetrics.go @@ -11,21 +11,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// List the specified metrics. You can use the returned metrics with GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) -// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) -// to get statistical data. Up to 500 results are returned for any one call. To -// retrieve additional results, use the returned token with subsequent calls. After -// you create a metric, allow up to 15 minutes for the metric to appear. To see -// metric statistics sooner, use GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) -// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) -// . If you are using CloudWatch cross-account observability, you can use this +// List the specified metrics. You can use the returned metrics with [GetMetricData] or [GetMetricStatistics] to get +// statistical data. +// +// Up to 500 results are returned for any one call. To retrieve additional +// results, use the returned token with subsequent calls. +// +// After you create a metric, allow up to 15 minutes for the metric to appear. To +// see metric statistics sooner, use [GetMetricData]or [GetMetricStatistics]. +// +// If you are using CloudWatch cross-account observability, you can use this // operation in a monitoring account and view metrics from the linked source -// accounts. For more information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) -// . ListMetrics doesn't return information about metrics if those metrics haven't -// reported data in the past two weeks. To retrieve those metrics, use -// GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) -// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) -// . +// accounts. For more information, see [CloudWatch cross-account observability]. +// +// ListMetrics doesn't return information about metrics if those metrics haven't +// reported data in the past two weeks. To retrieve those metrics, use [GetMetricData]or [GetMetricStatistics]. +// +// [GetMetricData]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html +// [GetMetricStatistics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html +// [CloudWatch cross-account observability]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html func (c *Client) ListMetrics(ctx context.Context, params *ListMetricsInput, optFns ...func(*Options)) (*ListMetricsOutput, error) { if params == nil { params = &ListMetricsInput{} @@ -48,7 +52,9 @@ type ListMetricsInput struct { Dimensions []types.DimensionFilter // If you are using this operation in a monitoring account, specify true to - // include metrics from source accounts in the returned data. The default is false . + // include metrics from source accounts in the returned data. + // + // The default is false . IncludeLinkedAccounts *bool // The name of the metric to filter against. Only the metrics with names that @@ -70,10 +76,11 @@ type ListMetricsInput struct { // To filter the results to show only metrics that have had data points published // in the past three hours, specify this parameter with a value of PT3H . This is - // the only valid value for this parameter. The results that are returned are an - // approximation of the value you specify. There is a low probability that the - // returned results include metrics with last published data as much as 40 minutes - // more than the specified time interval. + // the only valid value for this parameter. + // + // The results that are returned are an approximation of the value you specify. + // There is a low probability that the returned results include metrics with last + // published data as much as 40 minutes more than the specified time interval. RecentlyActive types.RecentlyActive noSmithyDocumentSerde @@ -89,8 +96,10 @@ type ListMetricsOutput struct { // If you are using this operation in a monitoring account, this array contains // the account IDs of the source accounts where the metrics in the returned data - // are from. This field is a 1:1 mapping between each metric that is returned and - // the ID of the owning account. + // are from. + // + // This field is a 1:1 mapping between each metric that is returned and the ID of + // the owning account. OwningAccounts []string // Metadata pertaining to the operation's result. @@ -154,6 +163,12 @@ func (c *Client) addOperationListMetricsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListMetricsValidationMiddleware(stack); err != nil { return err } @@ -178,13 +193,6 @@ func (c *Client) addOperationListMetricsMiddlewares(stack *middleware.Stack, opt return nil } -// ListMetricsAPIClient is a client that implements the ListMetrics operation. -type ListMetricsAPIClient interface { - ListMetrics(context.Context, *ListMetricsInput, ...func(*Options)) (*ListMetricsOutput, error) -} - -var _ ListMetricsAPIClient = (*Client)(nil) - // ListMetricsPaginatorOptions is the paginator options for ListMetrics type ListMetricsPaginatorOptions struct { // Set to true if pagination should stop if the service returns a pagination token @@ -236,6 +244,9 @@ func (p *ListMetricsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt params := *p.params params.NextToken = p.nextToken + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListMetrics(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -255,6 +266,13 @@ func (p *ListMetricsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt return result, nil } +// ListMetricsAPIClient is a client that implements the ListMetrics operation. +type ListMetricsAPIClient interface { + ListMetrics(context.Context, *ListMetricsInput, ...func(*Options)) (*ListMetricsOutput, error) +} + +var _ ListMetricsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListMetrics(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListTagsForResource.go index a675576a1bc..b5e2d9ecbc3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_ListTagsForResource.go @@ -30,12 +30,18 @@ func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForRes type ListTagsForResourceInput struct { - // The ARN of the CloudWatch resource that you want to view tags for. The ARN - // format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // The ARN of the CloudWatch resource that you want to view tags for. + // + // The ARN format of an alarm is + // arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // // The ARN format of a Contributor Insights rule is - // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more - // information about ARN format, see Resource Types Defined by Amazon CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) - // in the Amazon Web Services General Reference. + // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see [Resource Types Defined by Amazon CloudWatch] in the Amazon Web Services General + // Reference. + // + // [Resource Types Defined by Amazon CloudWatch]: https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies // // This member is required. ResourceARN *string @@ -109,6 +115,12 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutAnomalyDetector.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutAnomalyDetector.go index 4afd4b531b9..c04272705e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutAnomalyDetector.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutAnomalyDetector.go @@ -12,13 +12,16 @@ import ( ) // Creates an anomaly detection model for a CloudWatch metric. You can use the -// model to display a band of expected normal values when the metric is graphed. If -// you have enabled unified cross-account observability, and this account is a +// model to display a band of expected normal values when the metric is graphed. +// +// If you have enabled unified cross-account observability, and this account is a // monitoring account, the metric can be in the same account or a source account. // You can specify the account ID in the object you specify in the -// SingleMetricAnomalyDetector parameter. For more information, see CloudWatch -// Anomaly Detection (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Anomaly_Detection.html) -// . +// SingleMetricAnomalyDetector parameter. +// +// For more information, see [CloudWatch Anomaly Detection]. +// +// [CloudWatch Anomaly Detection]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Anomaly_Detection.html func (c *Client) PutAnomalyDetector(ctx context.Context, params *PutAnomalyDetectorInput, optFns ...func(*Options)) (*PutAnomalyDetectorOutput, error) { if params == nil { params = &PutAnomalyDetectorInput{} @@ -38,8 +41,9 @@ type PutAnomalyDetectorInput struct { // The configuration specifies details about how the anomaly detection model is to // be trained, including time ranges to exclude when training and updating the - // model. You can specify as many as 10 time ranges. The configuration can also - // include the time zone to use for the metric. + // model. You can specify as many as 10 time ranges. + // + // The configuration can also include the time zone to use for the metric. Configuration *types.AnomalyDetectorConfiguration // The metric dimensions to create the anomaly detection model for. @@ -52,14 +56,21 @@ type PutAnomalyDetectorInput struct { // Currently, it includes the PeriodicSpikes parameter. MetricCharacteristics *types.MetricCharacteristics - // The metric math anomaly detector to be created. When using - // MetricMathAnomalyDetector , you cannot include the following parameters in the - // same operation: + // The metric math anomaly detector to be created. + // + // When using MetricMathAnomalyDetector , you cannot include the following + // parameters in the same operation: + // // - Dimensions + // // - MetricName + // // - Namespace + // // - Stat + // // - the SingleMetricAnomalyDetector parameters of PutAnomalyDetectorInput + // // Instead, specify the metric math anomaly detector attributes as part of the // property MetricMathAnomalyDetector . MetricMathAnomalyDetector *types.MetricMathAnomalyDetector @@ -74,14 +85,21 @@ type PutAnomalyDetectorInput struct { // Deprecated: Use SingleMetricAnomalyDetector. Namespace *string - // A single metric anomaly detector to be created. When using - // SingleMetricAnomalyDetector , you cannot include the following parameters in the - // same operation: + // A single metric anomaly detector to be created. + // + // When using SingleMetricAnomalyDetector , you cannot include the following + // parameters in the same operation: + // // - Dimensions + // // - MetricName + // // - Namespace + // // - Stat + // // - the MetricMathAnomalyDetector parameters of PutAnomalyDetectorInput + // // Instead, specify the single metric anomaly detector attributes as part of the // property SingleMetricAnomalyDetector . SingleMetricAnomalyDetector *types.SingleMetricAnomalyDetector @@ -156,6 +174,12 @@ func (c *Client) addOperationPutAnomalyDetectorMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutAnomalyDetectorValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutCompositeAlarm.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutCompositeAlarm.go index 8d97adaeece..4ca6143cb58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutCompositeAlarm.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutCompositeAlarm.go @@ -14,42 +14,59 @@ import ( // Creates or updates a composite alarm. When you create a composite alarm, you // specify a rule expression for the alarm that takes into account the alarm states // of other alarms that you have created. The composite alarm goes into ALARM state -// only if all conditions of the rule are met. The alarms specified in a composite -// alarm's rule expression can include metric alarms and other composite alarms. -// The rule expression of a composite alarm can include as many as 100 underlying -// alarms. Any single alarm can be included in the rule expressions of as many as -// 150 composite alarms. Using composite alarms can reduce alarm noise. You can -// create multiple metric alarms, and also create a composite alarm and set up -// alerts only for the composite alarm. For example, you could create a composite -// alarm that goes into ALARM state only when more than one of the underlying -// metric alarms are in ALARM state. Composite alarms can take the following -// actions: +// only if all conditions of the rule are met. +// +// The alarms specified in a composite alarm's rule expression can include metric +// alarms and other composite alarms. The rule expression of a composite alarm can +// include as many as 100 underlying alarms. Any single alarm can be included in +// the rule expressions of as many as 150 composite alarms. +// +// Using composite alarms can reduce alarm noise. You can create multiple metric +// alarms, and also create a composite alarm and set up alerts only for the +// composite alarm. For example, you could create a composite alarm that goes into +// ALARM state only when more than one of the underlying metric alarms are in ALARM +// state. +// +// Composite alarms can take the following actions: +// // - Notify Amazon SNS topics. +// // - Invoke Lambda functions. +// // - Create OpsItems in Systems Manager Ops Center. +// // - Create incidents in Systems Manager Incident Manager. // // It is possible to create a loop or cycle of composite alarms, where composite // alarm A depends on composite alarm B, and composite alarm B also depends on // composite alarm A. In this scenario, you can't delete any composite alarm that // is part of the cycle because there is always still a composite alarm that -// depends on that alarm that you want to delete. To get out of such a situation, -// you must break the cycle by changing the rule of one of the composite alarms in -// the cycle to remove a dependency that creates the cycle. The simplest change to -// make to break a cycle is to change the AlarmRule of one of the alarms to false . +// depends on that alarm that you want to delete. +// +// To get out of such a situation, you must break the cycle by changing the rule +// of one of the composite alarms in the cycle to remove a dependency that creates +// the cycle. The simplest change to make to break a cycle is to change the +// AlarmRule of one of the alarms to false . +// // Additionally, the evaluation of composite alarms stops if CloudWatch detects a -// cycle in the evaluation path. When this operation creates an alarm, the alarm -// state is immediately set to INSUFFICIENT_DATA . The alarm is then evaluated and -// its state is set appropriately. Any actions associated with the new state are -// then executed. For a composite alarm, this initial time after creation is the -// only time that the alarm can be in INSUFFICIENT_DATA state. When you update an -// existing alarm, its state is left unchanged, but the update completely -// overwrites the previous configuration of the alarm. To use this operation, you -// must be signed on with the cloudwatch:PutCompositeAlarm permission that is -// scoped to * . You can't create a composite alarms if your -// cloudwatch:PutCompositeAlarm permission has a narrower scope. If you are an IAM -// user, you must have iam:CreateServiceLinkedRole to create a composite alarm -// that has Systems Manager OpsItem actions. +// cycle in the evaluation path. +// +// When this operation creates an alarm, the alarm state is immediately set to +// INSUFFICIENT_DATA . The alarm is then evaluated and its state is set +// appropriately. Any actions associated with the new state are then executed. For +// a composite alarm, this initial time after creation is the only time that the +// alarm can be in INSUFFICIENT_DATA state. +// +// When you update an existing alarm, its state is left unchanged, but the update +// completely overwrites the previous configuration of the alarm. +// +// To use this operation, you must be signed on with the +// cloudwatch:PutCompositeAlarm permission that is scoped to * . You can't create a +// composite alarms if your cloudwatch:PutCompositeAlarm permission has a narrower +// scope. +// +// If you are an IAM user, you must have iam:CreateServiceLinkedRole to create a +// composite alarm that has Systems Manager OpsItem actions. func (c *Client) PutCompositeAlarm(ctx context.Context, params *PutCompositeAlarmInput, optFns ...func(*Options)) (*PutCompositeAlarmOutput, error) { if params == nil { params = &PutCompositeAlarmInput{} @@ -77,33 +94,49 @@ type PutCompositeAlarmInput struct { // designate a function that specifies whether that alarm needs to be in ALARM // state, OK state, or INSUFFICIENT_DATA state. You can use operators (AND, OR and // NOT) to combine multiple functions in a single expression. You can use - // parenthesis to logically group the functions in your expression. You can use - // either alarm names or ARNs to reference the other alarms that are to be - // evaluated. Functions can include the following: + // parenthesis to logically group the functions in your expression. + // + // You can use either alarm names or ARNs to reference the other alarms that are + // to be evaluated. + // + // Functions can include the following: + // // - ALARM("alarm-name or alarm-ARN") is TRUE if the named alarm is in ALARM // state. + // // - OK("alarm-name or alarm-ARN") is TRUE if the named alarm is in OK state. + // // - INSUFFICIENT_DATA("alarm-name or alarm-ARN") is TRUE if the named alarm is // in INSUFFICIENT_DATA state. + // // - TRUE always evaluates to TRUE. + // // - FALSE always evaluates to FALSE. + // // TRUE and FALSE are useful for testing a complex AlarmRule structure, and for - // testing your alarm actions. Alarm names specified in AlarmRule can be - // surrounded with double-quotes ("), but do not have to be. The following are some - // examples of AlarmRule : + // testing your alarm actions. + // + // Alarm names specified in AlarmRule can be surrounded with double-quotes ("), + // but do not have to be. + // + // The following are some examples of AlarmRule : + // // - ALARM(CPUUtilizationTooHigh) AND ALARM(DiskReadOpsTooHigh) specifies that // the composite alarm goes into ALARM state only if both CPUUtilizationTooHigh and // DiskReadOpsTooHigh alarms are in ALARM state. + // // - ALARM(CPUUtilizationTooHigh) AND NOT ALARM(DeploymentInProgress) specifies // that the alarm goes to ALARM state if CPUUtilizationTooHigh is in ALARM state // and DeploymentInProgress is not in ALARM state. This example reduces alarm noise // during a known deployment window. + // // - (ALARM(CPUUtilizationTooHigh) OR ALARM(DiskReadOpsTooHigh)) AND // OK(NetworkOutTooHigh) goes into ALARM state if CPUUtilizationTooHigh OR // DiskReadOpsTooHigh is in ALARM state, and if NetworkOutTooHigh is in OK state. // This provides another example of using a composite alarm to prevent noise. This // rule ensures that you are not notified with an alarm action on high CPU or disk // usage if a known network problem is also occurring. + // // The AlarmRule can specify as many as 100 "children" alarms. The AlarmRule // expression can have as many as 500 elements. Elements are child alarms, TRUE or // FALSE statements, and parentheses. @@ -115,33 +148,48 @@ type PutCompositeAlarmInput struct { // state of the composite alarm. The default is TRUE . ActionsEnabled *bool - // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // Actions will be suppressed if the suppressor alarm is in the ALARM state. // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from an // existing alarm. ActionsSuppressor *string - // The maximum time in seconds that the composite alarm waits after suppressor + // The maximum time in seconds that the composite alarm waits after suppressor // alarm goes out of the ALARM state. After this time, the composite alarm - // performs its actions. ExtensionPeriod is required only when ActionsSuppressor - // is specified. + // performs its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. ActionsSuppressorExtensionPeriod *int32 - // The maximum time in seconds that the composite alarm waits for the suppressor + // The maximum time in seconds that the composite alarm waits for the suppressor // alarm to go into the ALARM state. After this time, the composite alarm performs - // its actions. WaitPeriod is required only when ActionsSuppressor is specified. + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. ActionsSuppressorWaitPeriod *int32 // The actions to execute when this alarm transitions to the ALARM state from any - // other state. Each action is specified as an Amazon Resource Name (ARN). Valid - // Values: ] Amazon SNS actions: arn:aws:sns:region:account-id:sns-topic-name + // other state. Each action is specified as an Amazon Resource Name (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name - // Systems Manager actions: arn:aws:ssm:region:account-id:opsitem:severity + // + // Systems Manager actions: + // + // arn:aws:ssm:region:account-id:opsitem:severity AlarmActions []string // The description for the composite alarm. @@ -149,38 +197,61 @@ type PutCompositeAlarmInput struct { // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA // state from any other state. Each action is specified as an Amazon Resource Name - // (ARN). Valid Values: ] Amazon SNS actions: - // arn:aws:sns:region:account-id:sns-topic-name Lambda actions: + // (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // + // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name InsufficientDataActions []string // The actions to execute when this alarm transitions to an OK state from any - // other state. Each action is specified as an Amazon Resource Name (ARN). Valid - // Values: ] Amazon SNS actions: arn:aws:sns:region:account-id:sns-topic-name + // other state. Each action is specified as an Amazon Resource Name (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name OKActions []string // A list of key-value pairs to associate with the alarm. You can associate as // many as 50 tags with an alarm. To be able to associate tags with the alarm when - // you create the alarm, you must have the cloudwatch:TagResource permission. Tags - // can help you organize and categorize your resources. You can also use them to - // scope user permissions by granting a user permission to access or change only - // resources with certain tag values. If you are using this operation to update an - // existing alarm, any tags you specify in this parameter are ignored. To change - // the tags of an existing alarm, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) - // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html) - // . + // you create the alarm, you must have the cloudwatch:TagResource permission. + // + // Tags can help you organize and categorize your resources. You can also use them + // to scope user permissions by granting a user permission to access or change only + // resources with certain tag values. + // + // If you are using this operation to update an existing alarm, any tags you + // specify in this parameter are ignored. To change the tags of an existing alarm, + // use [TagResource]or [UntagResource]. + // + // [TagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html + // [UntagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html Tags []types.Tag noSmithyDocumentSerde @@ -248,6 +319,12 @@ func (c *Client) addOperationPutCompositeAlarmMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutCompositeAlarmValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutDashboard.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutDashboard.go index dc0ad9620e8..b3858a2287a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutDashboard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutDashboard.go @@ -13,13 +13,17 @@ import ( // Creates a dashboard if it does not already exist, or updates an existing // dashboard. If you update a dashboard, the entire contents are replaced with what -// you specify here. All dashboards in your account are global, not -// region-specific. A simple way to create a dashboard using PutDashboard is to -// copy an existing dashboard. To copy an existing dashboard using the console, you -// can load the dashboard and then use the View/edit source command in the Actions -// menu to display the JSON block for that dashboard. Another way to copy a -// dashboard is to use GetDashboard , and then use the data returned within -// DashboardBody as the template for the new dashboard when you call PutDashboard . +// you specify here. +// +// All dashboards in your account are global, not region-specific. +// +// A simple way to create a dashboard using PutDashboard is to copy an existing +// dashboard. To copy an existing dashboard using the console, you can load the +// dashboard and then use the View/edit source command in the Actions menu to +// display the JSON block for that dashboard. Another way to copy a dashboard is to +// use GetDashboard , and then use the data returned within DashboardBody as the +// template for the new dashboard when you call PutDashboard . +// // When you create a dashboard with PutDashboard , a good practice is to add a text // widget at the top of the dashboard with a message that the dashboard was created // by script and should not be changed in the console. This message could also @@ -44,9 +48,11 @@ type PutDashboardInput struct { // The detailed information about the dashboard in JSON format, including the // widgets to include and their location on the dashboard. This parameter is - // required. For more information about the syntax, see Dashboard Body Structure - // and Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html) - // . + // required. + // + // For more information about the syntax, see [Dashboard Body Structure and Syntax]. + // + // [Dashboard Body Structure and Syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html // // This member is required. DashboardBody *string @@ -65,10 +71,14 @@ type PutDashboardInput struct { type PutDashboardOutput struct { // If the input for PutDashboard was correct and the dashboard was successfully - // created or modified, this result is empty. If this result includes only warning - // messages, then the input was valid enough for the dashboard to be created or - // modified, but some elements of the dashboard might not render. If this result - // includes error messages, the input was not valid and the operation failed. + // created or modified, this result is empty. + // + // If this result includes only warning messages, then the input was valid enough + // for the dashboard to be created or modified, but some elements of the dashboard + // might not render. + // + // If this result includes error messages, the input was not valid and the + // operation failed. DashboardValidationMessages []types.DashboardValidationMessage // Metadata pertaining to the operation's result. @@ -132,6 +142,12 @@ func (c *Client) addOperationPutDashboardMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutDashboardValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutInsightRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutInsightRule.go index 2f585c67b60..d45d9077d4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutInsightRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutInsightRule.go @@ -13,10 +13,12 @@ import ( // Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch // Logs log group, enabling you to find contributor data for the log events in that -// log group. For more information, see Using Contributor Insights to Analyze -// High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html) -// . If you create a rule, delete it, and then re-create it with the same name, +// log group. For more information, see [Using Contributor Insights to Analyze High-Cardinality Data]. +// +// If you create a rule, delete it, and then re-create it with the same name, // historical data from the first time the rule was created might not be available. +// +// [Using Contributor Insights to Analyze High-Cardinality Data]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html func (c *Client) PutInsightRule(ctx context.Context, params *PutInsightRuleInput, optFns ...func(*Options)) (*PutInsightRuleOutput, error) { if params == nil { params = &PutInsightRuleInput{} @@ -35,8 +37,9 @@ func (c *Client) PutInsightRule(ctx context.Context, params *PutInsightRuleInput type PutInsightRuleInput struct { // The definition of the rule, as a JSON object. For details on the valid syntax, - // see Contributor Insights Rule Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html) - // . + // see [Contributor Insights Rule Syntax]. + // + // [Contributor Insights Rule Syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html // // This member is required. RuleDefinition *string @@ -50,15 +53,21 @@ type PutInsightRuleInput struct { RuleState *string // A list of key-value pairs to associate with the Contributor Insights rule. You - // can associate as many as 50 tags with a rule. Tags can help you organize and - // categorize your resources. You can also use them to scope user permissions, by - // granting a user permission to access or change only the resources that have - // certain tag values. To be able to associate tags with a rule, you must have the + // can associate as many as 50 tags with a rule. + // + // Tags can help you organize and categorize your resources. You can also use them + // to scope user permissions, by granting a user permission to access or change + // only the resources that have certain tag values. + // + // To be able to associate tags with a rule, you must have the // cloudwatch:TagResource permission in addition to the cloudwatch:PutInsightRule - // permission. If you are using this operation to update an existing Contributor - // Insights rule, any tags you specify in this parameter are ignored. To change the - // tags of an existing rule, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) - // . + // permission. + // + // If you are using this operation to update an existing Contributor Insights + // rule, any tags you specify in this parameter are ignored. To change the tags of + // an existing rule, use [TagResource]. + // + // [TagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html Tags []types.Tag noSmithyDocumentSerde @@ -126,6 +135,12 @@ func (c *Client) addOperationPutInsightRuleMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutInsightRuleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutManagedInsightRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutManagedInsightRules.go index f664daf8ae3..ab815777a79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutManagedInsightRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutManagedInsightRules.go @@ -11,14 +11,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a managed Contributor Insights rule for a specified Amazon Web Services -// resource. When you enable a managed rule, you create a Contributor Insights rule -// that collects data from Amazon Web Services services. You cannot edit these -// rules with PutInsightRule . The rules can be enabled, disabled, and deleted -// using EnableInsightRules , DisableInsightRules , and DeleteInsightRules . If a -// previously created managed rule is currently disabled, a subsequent call to this -// API will re-enable it. Use ListManagedInsightRules to describe all available -// rules. +// Creates a managed Contributor Insights rule for a specified Amazon Web +// +// Services resource. When you enable a managed rule, you create a Contributor +// Insights rule that collects data from Amazon Web Services services. You cannot +// edit these rules with PutInsightRule . The rules can be enabled, disabled, and +// deleted using EnableInsightRules , DisableInsightRules , and DeleteInsightRules +// . If a previously created managed rule is currently disabled, a subsequent call +// to this API will re-enable it. Use ListManagedInsightRules to describe all +// available rules. func (c *Client) PutManagedInsightRules(ctx context.Context, params *PutManagedInsightRulesInput, optFns ...func(*Options)) (*PutManagedInsightRulesOutput, error) { if params == nil { params = &PutManagedInsightRulesInput{} @@ -36,7 +37,7 @@ func (c *Client) PutManagedInsightRules(ctx context.Context, params *PutManagedI type PutManagedInsightRulesInput struct { - // A list of ManagedRules to enable. + // A list of ManagedRules to enable. // // This member is required. ManagedRules []types.ManagedRule @@ -46,7 +47,7 @@ type PutManagedInsightRulesInput struct { type PutManagedInsightRulesOutput struct { - // An array that lists the rules that could not be enabled. + // An array that lists the rules that could not be enabled. Failures []types.PartialFailure // Metadata pertaining to the operation's result. @@ -110,6 +111,12 @@ func (c *Client) addOperationPutManagedInsightRulesMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutManagedInsightRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricAlarm.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricAlarm.go index c16d0f0463e..82db2a3f07c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricAlarm.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricAlarm.go @@ -13,16 +13,22 @@ import ( // Creates or updates an alarm and associates it with the specified metric, metric // math expression, anomaly detection model, or Metrics Insights query. For more -// information about using a Metrics Insights query for an alarm, see Create -// alarms on Metrics Insights queries (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html) -// . Alarms based on anomaly detection models cannot have Auto Scaling actions. +// information about using a Metrics Insights query for an alarm, see [Create alarms on Metrics Insights queries]. +// +// Alarms based on anomaly detection models cannot have Auto Scaling actions. +// // When this operation creates an alarm, the alarm state is immediately set to // INSUFFICIENT_DATA . The alarm is then evaluated and its state is set -// appropriately. Any actions associated with the new state are then executed. When -// you update an existing alarm, its state is left unchanged, but the update -// completely overwrites the previous configuration of the alarm. If you are an IAM -// user, you must have Amazon EC2 permissions for some alarm operations: +// appropriately. Any actions associated with the new state are then executed. +// +// When you update an existing alarm, its state is left unchanged, but the update +// completely overwrites the previous configuration of the alarm. +// +// If you are an IAM user, you must have Amazon EC2 permissions for some alarm +// operations: +// // - The iam:CreateServiceLinkedRole permission for all alarms with EC2 actions +// // - The iam:CreateServiceLinkedRole permissions to create an alarm with Systems // Manager OpsItem or response plan actions. // @@ -30,24 +36,31 @@ import ( // Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the // necessary service-linked role for you. The service-linked roles are called // AWSServiceRoleForCloudWatchEvents and -// AWSServiceRoleForCloudWatchAlarms_ActionSSM . For more information, see Amazon -// Web Services service-linked role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role) -// . Each PutMetricAlarm action has a maximum uncompressed payload of 120 KB. -// Cross-account alarms You can set an alarm on metrics in the current account, or -// in another account. To create a cross-account alarm that watches a metric in a -// different account, you must have completed the following pre-requisites: +// AWSServiceRoleForCloudWatchAlarms_ActionSSM . For more information, see [Amazon Web Services service-linked role]. +// +// Each PutMetricAlarm action has a maximum uncompressed payload of 120 KB. +// +// # Cross-account alarms +// +// You can set an alarm on metrics in the current account, or in another account. +// To create a cross-account alarm that watches a metric in a different account, +// you must have completed the following pre-requisites: +// // - The account where the metrics are located (the sharing account) must // already have a sharing role named CloudWatch-CrossAccountSharingRole. If it does // not already have this role, you must create it using the instructions in Set up -// a sharing account in Cross-account cross-Region CloudWatch console (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html#enable-cross-account-cross-Region) -// . The policy for that role must grant access to the ID of the account where you -// are creating the alarm. +// a sharing account in [Cross-account cross-Region CloudWatch console]. The policy for that role must grant access to the ID +// of the account where you are creating the alarm. +// // - The account where you are creating the alarm (the monitoring account) must // already have a service-linked role named AWSServiceRoleForCloudWatchCrossAccount // to allow CloudWatch to assume the sharing role in the sharing account. If it // does not, you must create it following the directions in Set up a monitoring -// account in Cross-account cross-Region CloudWatch console (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html#enable-cross-account-cross-Region) -// . +// account in [Cross-account cross-Region CloudWatch console]. +// +// [Amazon Web Services service-linked role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role +// [Create alarms on Metrics Insights queries]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html +// [Cross-account cross-Region CloudWatch console]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html#enable-cross-account-cross-Region func (c *Client) PutMetricAlarm(ctx context.Context, params *PutMetricAlarmInput, optFns ...func(*Options)) (*PutMetricAlarmOutput, error) { if params == nil { params = &PutMetricAlarmInput{} @@ -65,17 +78,20 @@ func (c *Client) PutMetricAlarm(ctx context.Context, params *PutMetricAlarmInput type PutMetricAlarmInput struct { - // The name for the alarm. This name must be unique within the Region. The name - // must contain only UTF-8 characters, and can't contain ASCII control characters + // The name for the alarm. This name must be unique within the Region. + // + // The name must contain only UTF-8 characters, and can't contain ASCII control + // characters // // This member is required. AlarmName *string - // The arithmetic operation to use when comparing the specified statistic and - // threshold. The specified statistic value is used as the first operand. The - // values LessThanLowerOrGreaterThanUpperThreshold , LessThanLowerThreshold , and - // GreaterThanUpperThreshold are used only for alarms based on anomaly detection - // models. + // The arithmetic operation to use when comparing the specified statistic and + // threshold. The specified statistic value is used as the first operand. + // + // The values LessThanLowerOrGreaterThanUpperThreshold , LessThanLowerThreshold , + // and GreaterThanUpperThreshold are used only for alarms based on anomaly + // detection models. // // This member is required. ComparisonOperator types.ComparisonOperator @@ -83,9 +99,10 @@ type PutMetricAlarmInput struct { // The number of periods over which data is compared to the specified threshold. // If you are setting an alarm that requires that a number of consecutive data // points be breaching to trigger the alarm, this value specifies that number. If - // you are setting an "M out of N" alarm, this value is the N. An alarm's total - // current evaluation period can be no longer than one day, so this number - // multiplied by Period cannot be more than 86,400 seconds. + // you are setting an "M out of N" alarm, this value is the N. + // + // An alarm's total current evaluation period can be no longer than one day, so + // this number multiplied by Period cannot be more than 86,400 seconds. // // This member is required. EvaluationPeriods *int32 @@ -96,31 +113,51 @@ type PutMetricAlarmInput struct { // The actions to execute when this alarm transitions to the ALARM state from any // other state. Each action is specified as an Amazon Resource Name (ARN). Valid - // values: EC2 actions: + // values: + // + // EC2 actions: + // // - arn:aws:automate:region:ec2:stop + // // - arn:aws:automate:region:ec2:terminate + // // - arn:aws:automate:region:ec2:reboot + // // - arn:aws:automate:region:ec2:recover + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // // - // arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // // Autoscaling action: + // // - // arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name // // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name + // // SNS notification action: + // // - arn:aws:sns:region:account-id:sns-topic-name + // // SSM integration actions: + // // - arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // // - arn:aws:ssm-incidents::account-id:responseplan/response-plan-name AlarmActions []string @@ -129,91 +166,134 @@ type PutMetricAlarmInput struct { // The number of data points that must be breaching to trigger the alarm. This is // used only if you are setting an "M out of N" alarm. In that case, this value is - // the M. For more information, see Evaluating an Alarm (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation) - // in the Amazon CloudWatch User Guide. + // the M. For more information, see [Evaluating an Alarm]in the Amazon CloudWatch User Guide. + // + // [Evaluating an Alarm]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation DatapointsToAlarm *int32 // The dimensions for the metric specified in MetricName . Dimensions []types.Dimension - // Used only for alarms based on percentiles. If you specify ignore , the alarm + // Used only for alarms based on percentiles. If you specify ignore , the alarm // state does not change during periods with too few data points to be // statistically significant. If you specify evaluate or omit this parameter, the // alarm is always evaluated and possibly changes state no matter how many data - // points are available. For more information, see Percentile-Based CloudWatch - // Alarms and Low Data Samples (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples) - // . Valid Values: evaluate | ignore + // points are available. For more information, see [Percentile-Based CloudWatch Alarms and Low Data Samples]. + // + // Valid Values: evaluate | ignore + // + // [Percentile-Based CloudWatch Alarms and Low Data Samples]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples EvaluateLowSampleCountPercentile *string // The extended statistic for the metric specified in MetricName . When you call // PutMetricAlarm and specify a MetricName , you must specify either Statistic or - // ExtendedStatistic but not both. If you specify ExtendedStatistic , the following - // are valid values: + // ExtendedStatistic but not both. + // + // If you specify ExtendedStatistic , the following are valid values: + // // - p90 + // // - tm90 + // // - tc90 + // // - ts90 + // // - wm90 + // // - IQM + // // - PR(n:m) where n and m are values of the metric + // // - TC(X%:X%) where X is between 10 and 90 inclusive. + // // - TM(X%:X%) where X is between 10 and 90 inclusive. + // // - TS(X%:X%) where X is between 10 and 90 inclusive. + // // - WM(X%:X%) where X is between 10 and 90 inclusive. - // For more information about these extended statistics, see CloudWatch statistics - // definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html) - // . + // + // For more information about these extended statistics, see [CloudWatch statistics definitions]. + // + // [CloudWatch statistics definitions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html ExtendedStatistic *string // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA // state from any other state. Each action is specified as an Amazon Resource Name - // (ARN). Valid values: EC2 actions: + // (ARN). Valid values: + // + // EC2 actions: + // // - arn:aws:automate:region:ec2:stop + // // - arn:aws:automate:region:ec2:terminate + // // - arn:aws:automate:region:ec2:reboot + // // - arn:aws:automate:region:ec2:recover + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // // - // arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // // Autoscaling action: + // // - // arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name // // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name + // // SNS notification action: + // // - arn:aws:sns:region:account-id:sns-topic-name + // // SSM integration actions: + // // - arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // // - arn:aws:ssm-incidents::account-id:responseplan/response-plan-name InsufficientDataActions []string // The name for the metric associated with the alarm. For each PutMetricAlarm - // operation, you must specify either MetricName or a Metrics array. If you are - // creating an alarm based on a math expression, you cannot specify this parameter, - // or any of the Namespace , Dimensions , Period , Unit , Statistic , or - // ExtendedStatistic parameters. Instead, you specify all this information in the - // Metrics array. + // operation, you must specify either MetricName or a Metrics array. + // + // If you are creating an alarm based on a math expression, you cannot specify + // this parameter, or any of the Namespace , Dimensions , Period , Unit , Statistic + // , or ExtendedStatistic parameters. Instead, you specify all this information in + // the Metrics array. MetricName *string // An array of MetricDataQuery structures that enable you to create an alarm based // on the result of a metric math expression. For each PutMetricAlarm operation, - // you must specify either MetricName or a Metrics array. Each item in the Metrics - // array either retrieves a metric or performs a math expression. One item in the - // Metrics array is the expression that the alarm watches. You designate this - // expression by setting ReturnData to true for this object in the array. For more - // information, see MetricDataQuery (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDataQuery.html) - // . If you use the Metrics parameter, you cannot include the Namespace , - // MetricName , Dimensions , Period , Unit , Statistic , or ExtendedStatistic - // parameters of PutMetricAlarm in the same operation. Instead, you retrieve the - // metrics you are using in your math expression as part of the Metrics array. + // you must specify either MetricName or a Metrics array. + // + // Each item in the Metrics array either retrieves a metric or performs a math + // expression. + // + // One item in the Metrics array is the expression that the alarm watches. You + // designate this expression by setting ReturnData to true for this object in the + // array. For more information, see [MetricDataQuery]. + // + // If you use the Metrics parameter, you cannot include the Namespace , MetricName + // , Dimensions , Period , Unit , Statistic , or ExtendedStatistic parameters of + // PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are + // using in your math expression as part of the Metrics array. + // + // [MetricDataQuery]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDataQuery.html Metrics []types.MetricDataQuery // The namespace for the metric associated specified in MetricName . @@ -221,49 +301,74 @@ type PutMetricAlarmInput struct { // The actions to execute when this alarm transitions to an OK state from any // other state. Each action is specified as an Amazon Resource Name (ARN). Valid - // values: EC2 actions: + // values: + // + // EC2 actions: + // // - arn:aws:automate:region:ec2:stop + // // - arn:aws:automate:region:ec2:terminate + // // - arn:aws:automate:region:ec2:reboot + // // - arn:aws:automate:region:ec2:recover + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // // - // arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // // - arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // // Autoscaling action: + // // - // arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name // // Lambda actions: + // // - Invoke the latest version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name + // // - Invoke a specific version of a Lambda function: // arn:aws:lambda:region:account-id:function:function-name:version-number + // // - Invoke a function by using an alias Lambda function: // arn:aws:lambda:region:account-id:function:function-name:alias-name + // // SNS notification action: + // // - arn:aws:sns:region:account-id:sns-topic-name + // // SSM integration actions: + // // - arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // // - arn:aws:ssm-incidents::account-id:responseplan/response-plan-name OKActions []string // The length, in seconds, used each time the metric specified in MetricName is - // evaluated. Valid values are 10, 30, and any multiple of 60. Period is required - // for alarms based on static thresholds. If you are creating an alarm based on a - // metric math expression, you specify the period for each metric within the - // objects in the Metrics array. Be sure to specify 10 or 30 only for metrics that - // are stored by a PutMetricData call with a StorageResolution of 1. If you - // specify a period of 10 or 30 for a metric that does not have sub-minute - // resolution, the alarm still attempts to gather data at the period rate that you - // specify. In this case, it does not receive data for the attempts that do not - // correspond to a one-minute data resolution, and the alarm might often lapse into - // INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a - // high-resolution alarm, which has a higher charge than other alarms. For more - // information about pricing, see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/) - // . An alarm's total current evaluation period can be no longer than one day, so + // evaluated. Valid values are 10, 30, and any multiple of 60. + // + // Period is required for alarms based on static thresholds. If you are creating + // an alarm based on a metric math expression, you specify the period for each + // metric within the objects in the Metrics array. + // + // Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData + // call with a StorageResolution of 1. If you specify a period of 10 or 30 for a + // metric that does not have sub-minute resolution, the alarm still attempts to + // gather data at the period rate that you specify. In this case, it does not + // receive data for the attempts that do not correspond to a one-minute data + // resolution, and the alarm might often lapse into INSUFFICENT_DATA status. + // Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a + // higher charge than other alarms. For more information about pricing, see [Amazon CloudWatch Pricing]. + // + // An alarm's total current evaluation period can be no longer than one day, so // Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds. + // + // [Amazon CloudWatch Pricing]: https://aws.amazon.com/cloudwatch/pricing/ Period *int32 // The statistic for the metric specified in MetricName , other than percentile. @@ -274,35 +379,46 @@ type PutMetricAlarmInput struct { // A list of key-value pairs to associate with the alarm. You can associate as // many as 50 tags with an alarm. To be able to associate tags with the alarm when - // you create the alarm, you must have the cloudwatch:TagResource permission. Tags - // can help you organize and categorize your resources. You can also use them to - // scope user permissions by granting a user permission to access or change only - // resources with certain tag values. If you are using this operation to update an - // existing alarm, any tags you specify in this parameter are ignored. To change - // the tags of an existing alarm, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) - // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html) - // . + // you create the alarm, you must have the cloudwatch:TagResource permission. + // + // Tags can help you organize and categorize your resources. You can also use them + // to scope user permissions by granting a user permission to access or change only + // resources with certain tag values. + // + // If you are using this operation to update an existing alarm, any tags you + // specify in this parameter are ignored. To change the tags of an existing alarm, + // use [TagResource]or [UntagResource]. + // + // [TagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html + // [UntagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html Tags []types.Tag - // The value against which the specified statistic is compared. This parameter is - // required for alarms based on static thresholds, but should not be used for - // alarms based on anomaly detection models. + // The value against which the specified statistic is compared. + // + // This parameter is required for alarms based on static thresholds, but should + // not be used for alarms based on anomaly detection models. Threshold *float64 // If this is an alarm based on an anomaly detection model, make this value match - // the ID of the ANOMALY_DETECTION_BAND function. For an example of how to use - // this parameter, see the Anomaly Detection Model Alarm example on this page. If - // your alarm uses this parameter, it cannot have Auto Scaling actions. + // the ID of the ANOMALY_DETECTION_BAND function. + // + // For an example of how to use this parameter, see the Anomaly Detection Model + // Alarm example on this page. + // + // If your alarm uses this parameter, it cannot have Auto Scaling actions. ThresholdMetricId *string - // Sets how this alarm is to handle missing data points. If TreatMissingData is - // omitted, the default behavior of missing is used. For more information, see - // Configuring How CloudWatch Alarms Treats Missing Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data) - // . Valid Values: breaching | notBreaching | ignore | missing Alarms that - // evaluate metrics in the AWS/DynamoDB namespace always ignore missing data even - // if you choose a different option for TreatMissingData . When an AWS/DynamoDB - // metric has missing data, alarms that evaluate that metric remain in their - // current state. + // Sets how this alarm is to handle missing data points. If TreatMissingData is + // omitted, the default behavior of missing is used. For more information, see [Configuring How CloudWatch Alarms Treats Missing Data]. + // + // Valid Values: breaching | notBreaching | ignore | missing + // + // Alarms that evaluate metrics in the AWS/DynamoDB namespace always ignore + // missing data even if you choose a different option for TreatMissingData . When + // an AWS/DynamoDB metric has missing data, alarms that evaluate that metric + // remain in their current state. + // + // [Configuring How CloudWatch Alarms Treats Missing Data]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data TreatMissingData *string // The unit of measure for the statistic. For example, the units for the Amazon @@ -312,14 +428,19 @@ type PutMetricAlarmInput struct { // Metric data points that specify a unit of measure, such as Percent, are // aggregated separately. If you are creating an alarm based on a metric math // expression, you can specify the unit for each metric (if needed) within the - // objects in the Metrics array. If you don't specify Unit , CloudWatch retrieves - // all unit types that have been published for the metric and attempts to evaluate - // the alarm. Usually, metrics are published with only one unit, so the alarm works - // as intended. However, if the metric is published with multiple types of units - // and you don't specify a unit, the alarm's behavior is not defined and it behaves - // unpredictably. We recommend omitting Unit so that you don't inadvertently - // specify an incorrect unit that is not published for this metric. Doing so causes - // the alarm to be stuck in the INSUFFICIENT DATA state. + // objects in the Metrics array. + // + // If you don't specify Unit , CloudWatch retrieves all unit types that have been + // published for the metric and attempts to evaluate the alarm. Usually, metrics + // are published with only one unit, so the alarm works as intended. + // + // However, if the metric is published with multiple types of units and you don't + // specify a unit, the alarm's behavior is not defined and it behaves + // unpredictably. + // + // We recommend omitting Unit so that you don't inadvertently specify an incorrect + // unit that is not published for this metric. Doing so causes the alarm to be + // stuck in the INSUFFICIENT DATA state. Unit types.StandardUnit noSmithyDocumentSerde @@ -387,6 +508,12 @@ func (c *Client) addOperationPutMetricAlarmMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutMetricAlarmValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricData.go index e8fe999300d..55d836f34cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricData.go @@ -15,37 +15,52 @@ import ( // Publishes metric data points to Amazon CloudWatch. CloudWatch associates the // data points with the specified metric. If the specified metric does not exist, // CloudWatch creates the metric. When CloudWatch creates a metric, it can take up -// to fifteen minutes for the metric to appear in calls to ListMetrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html) -// . You can publish either individual data points in the Value field, or arrays -// of values and the number of times each value occurred during the period by using +// to fifteen minutes for the metric to appear in calls to [ListMetrics]. +// +// You can publish either individual data points in the Value field, or arrays of +// values and the number of times each value occurred during the period by using // the Values and Counts fields in the MetricData structure. Using the Values and // Counts method enables you to publish up to 150 values per metric with one // PutMetricData request, and supports retrieving percentile statistics on this -// data. Each PutMetricData request is limited to 1 MB in size for HTTP POST -// requests. You can send a payload compressed by gzip. Each request is also -// limited to no more than 1000 different metrics. Although the Value parameter -// accepts numbers of type Double , CloudWatch rejects values that are either too -// small or too large. Values must be in the range of -2^360 to 2^360. In addition, -// special values (for example, NaN, +Infinity, -Infinity) are not supported. You -// can use up to 30 dimensions per metric to further clarify what data the metric -// collects. Each dimension consists of a Name and Value pair. For more information -// about specifying dimensions, see Publishing Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) -// in the Amazon CloudWatch User Guide. You specify the time stamp to be associated -// with each data point. You can specify time stamps that are as much as two weeks -// before the current date, and as much as 2 hours after the current day and time. +// data. +// +// Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. +// You can send a payload compressed by gzip. Each request is also limited to no +// more than 1000 different metrics. +// +// Although the Value parameter accepts numbers of type Double , CloudWatch rejects +// values that are either too small or too large. Values must be in the range of +// -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, +// -Infinity) are not supported. +// +// You can use up to 30 dimensions per metric to further clarify what data the +// metric collects. Each dimension consists of a Name and Value pair. For more +// information about specifying dimensions, see [Publishing Metrics]in the Amazon CloudWatch User +// Guide. +// +// You specify the time stamp to be associated with each data point. You can +// specify time stamps that are as much as two weeks before the current date, and +// as much as 2 hours after the current day and time. +// // Data points with time stamps from 24 hours ago or longer can take at least 48 -// hours to become available for GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) -// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) -// from the time they are submitted. Data points with time stamps between 3 and 24 -// hours ago can take as much as 2 hours to become available for for GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) -// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) -// . CloudWatch needs raw data points to calculate percentile statistics. If you +// hours to become available for [GetMetricData]or [GetMetricStatistics] from the time they are submitted. Data points +// with time stamps between 3 and 24 hours ago can take as much as 2 hours to +// become available for for [GetMetricData]or [GetMetricStatistics]. +// +// CloudWatch needs raw data points to calculate percentile statistics. If you // publish data using a statistic set instead, you can only retrieve percentile // statistics for this data if one of the following conditions is true: +// // - The SampleCount value of the statistic set is 1 and Min , Max , and Sum are // all equal. +// // - The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount // . +// +// [GetMetricData]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html +// [GetMetricStatistics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html +// [ListMetrics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +// [Publishing Metrics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html func (c *Client) PutMetricData(ctx context.Context, params *PutMetricDataInput, optFns ...func(*Options)) (*PutMetricDataOutput, error) { if params == nil { params = &PutMetricDataInput{} @@ -70,9 +85,10 @@ type PutMetricDataInput struct { MetricData []types.MetricDatum // The namespace for the metric data. You can use ASCII characters for the - // namespace, except for control characters which are not supported. To avoid - // conflicts with Amazon Web Services service namespaces, you should not specify a - // namespace that begins with AWS/ + // namespace, except for control characters which are not supported. + // + // To avoid conflicts with Amazon Web Services service namespaces, you should not + // specify a namespace that begins with AWS/ // // This member is required. Namespace *string @@ -142,6 +158,15 @@ func (c *Client) addOperationPutMetricDataMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsRequestCompressionUserAgent(stack, options); err != nil { + return err + } if err = addOperationPutMetricDataRequestCompressionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricStream.go index d1b6264420c..f1d4a4f0f71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_PutMetricStream.go @@ -13,14 +13,21 @@ import ( // Creates or updates a metric stream. Metric streams can automatically stream // CloudWatch metrics to Amazon Web Services destinations, including Amazon S3, and -// to many third-party solutions. For more information, see Using Metric Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html) -// . To create a metric stream, you must be signed in to an account that has the +// to many third-party solutions. +// +// For more information, see [Using Metric Streams]. +// +// To create a metric stream, you must be signed in to an account that has the // iam:PassRole permission and either the CloudWatchFullAccess policy or the -// cloudwatch:PutMetricStream permission. When you create or update a metric -// stream, you choose one of the following: +// cloudwatch:PutMetricStream permission. +// +// When you create or update a metric stream, you choose one of the following: +// // - Stream metrics from all metric namespaces in the account. +// // - Stream metrics from all metric namespaces in the account, except for the // namespaces that you list in ExcludeFilters . +// // - Stream metrics from only the metric namespaces that you list in // IncludeFilters . // @@ -28,14 +35,19 @@ import ( // statistics for each metric that is streamed. You can use the // StatisticsConfigurations parameter to have the metric stream send additional // statistics in the stream. Streaming additional statistics incurs additional -// costs. For more information, see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/) -// . When you use PutMetricStream to create a new metric stream, the stream is +// costs. For more information, see [Amazon CloudWatch Pricing]. +// +// When you use PutMetricStream to create a new metric stream, the stream is // created in the running state. If you use it to update an existing stream, the -// state of the stream is not changed. If you are using CloudWatch cross-account -// observability and you create a metric stream in a monitoring account, you can -// choose whether to include metrics from source accounts in the stream. For more -// information, see CloudWatch cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) -// . +// state of the stream is not changed. +// +// If you are using CloudWatch cross-account observability and you create a metric +// stream in a monitoring account, you can choose whether to include metrics from +// source accounts in the stream. For more information, see [CloudWatch cross-account observability]. +// +// [Using Metric Streams]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html +// [CloudWatch cross-account observability]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html +// [Amazon CloudWatch Pricing]: https://aws.amazon.com/cloudwatch/pricing/ func (c *Client) PutMetricStream(ctx context.Context, params *PutMetricStreamInput, optFns ...func(*Options)) (*PutMetricStreamOutput, error) { if params == nil { params = &PutMetricStreamInput{} @@ -62,17 +74,21 @@ type PutMetricStreamInput struct { // If you are creating a new metric stream, this is the name for the new stream. // The name must be different than the names of other metric streams in this - // account and Region. If you are updating a metric stream, specify the name of - // that stream here. Valid characters are A-Z, a-z, 0-9, "-" and "_". + // account and Region. + // + // If you are updating a metric stream, specify the name of that stream here. + // + // Valid characters are A-Z, a-z, 0-9, "-" and "_". // // This member is required. Name *string // The output format for the stream. Valid values are json , opentelemetry1.0 , and - // opentelemetry0.7 . For more information about metric stream output formats, see - // Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html) + // opentelemetry0.7 . For more information about metric stream output formats, see [Metric streams output formats] // . // + // [Metric streams output formats]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html + // // This member is required. OutputFormat types.MetricStreamOutputFormat @@ -80,20 +96,24 @@ type PutMetricStreamInput struct { // Kinesis Data Firehose resources. This IAM role must already exist and must be in // the same account as the metric stream. This IAM role must include the following // permissions: + // // - firehose:PutRecord + // // - firehose:PutRecordBatch // // This member is required. RoleArn *string // If you specify this parameter, the stream sends metrics from all metric - // namespaces except for the namespaces that you specify here. You cannot include - // ExcludeFilters and IncludeFilters in the same operation. + // namespaces except for the namespaces that you specify here. + // + // You cannot include ExcludeFilters and IncludeFilters in the same operation. ExcludeFilters []types.MetricStreamFilter // If you specify this parameter, the stream sends only the metrics from the - // metric namespaces that you specify here. You cannot include IncludeFilters and - // ExcludeFilters in the same operation. + // metric namespaces that you specify here. + // + // You cannot include IncludeFilters and ExcludeFilters in the same operation. IncludeFilters []types.MetricStreamFilter // If you are creating a metric stream in a monitoring account, specify true to @@ -103,25 +123,33 @@ type PutMetricStreamInput struct { // By default, a metric stream always sends the MAX , MIN , SUM , and SAMPLECOUNT // statistics for each metric that is streamed. You can use this parameter to have // the metric stream also send additional statistics in the stream. This array can - // have up to 100 members. For each entry in this array, you specify one or more - // metrics and the list of additional statistics to stream for those metrics. The - // additional statistics that you can stream depend on the stream's OutputFormat . - // If the OutputFormat is json , you can stream any additional statistic that is - // supported by CloudWatch, listed in CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html) - // . If the OutputFormat is opentelemetry1.0 or opentelemetry0.7 , you can stream - // percentile statistics such as p95, p99.9, and so on. + // have up to 100 members. + // + // For each entry in this array, you specify one or more metrics and the list of + // additional statistics to stream for those metrics. The additional statistics + // that you can stream depend on the stream's OutputFormat . If the OutputFormat + // is json , you can stream any additional statistic that is supported by + // CloudWatch, listed in [CloudWatch statistics definitions]. If the OutputFormat is opentelemetry1.0 or + // opentelemetry0.7 , you can stream percentile statistics such as p95, p99.9, and + // so on. + // + // [CloudWatch statistics definitions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html StatisticsConfigurations []types.MetricStreamStatisticsConfiguration // A list of key-value pairs to associate with the metric stream. You can - // associate as many as 50 tags with a metric stream. Tags can help you organize - // and categorize your resources. You can also use them to scope user permissions - // by granting a user permission to access or change only resources with certain - // tag values. You can use this parameter only when you are creating a new metric - // stream. If you are using this operation to update an existing metric stream, any - // tags you specify in this parameter are ignored. To change the tags of an - // existing metric stream, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) - // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html) - // . + // associate as many as 50 tags with a metric stream. + // + // Tags can help you organize and categorize your resources. You can also use them + // to scope user permissions by granting a user permission to access or change only + // resources with certain tag values. + // + // You can use this parameter only when you are creating a new metric stream. If + // you are using this operation to update an existing metric stream, any tags you + // specify in this parameter are ignored. To change the tags of an existing metric + // stream, use [TagResource]or [UntagResource]. + // + // [TagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html + // [UntagResource]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html Tags []types.Tag noSmithyDocumentSerde @@ -193,6 +221,12 @@ func (c *Client) addOperationPutMetricStreamMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutMetricStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_SetAlarmState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_SetAlarmState.go index cf3a2942451..3048696dff0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_SetAlarmState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_SetAlarmState.go @@ -15,16 +15,22 @@ import ( // state differs from the previous value, the action configured for the appropriate // state is invoked. For example, if your alarm is configured to send an Amazon SNS // message when an alarm is triggered, temporarily changing the alarm state to -// ALARM sends an SNS message. Metric alarms returns to their actual state quickly, -// often within seconds. Because the metric alarm state change happens quickly, it -// is typically only visible in the alarm's History tab in the Amazon CloudWatch -// console or through DescribeAlarmHistory (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html) -// . If you use SetAlarmState on a composite alarm, the composite alarm is not +// ALARM sends an SNS message. +// +// Metric alarms returns to their actual state quickly, often within seconds. +// Because the metric alarm state change happens quickly, it is typically only +// visible in the alarm's History tab in the Amazon CloudWatch console or through [DescribeAlarmHistory]. +// +// If you use SetAlarmState on a composite alarm, the composite alarm is not // guaranteed to return to its actual state. It returns to its actual state only // once any of its children alarms change state. It is also reevaluated if you -// update its configuration. If an alarm triggers EC2 Auto Scaling policies or -// application Auto Scaling policies, you must include information in the -// StateReasonData parameter to enable the policy to take the correct action. +// update its configuration. +// +// If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling +// policies, you must include information in the StateReasonData parameter to +// enable the policy to take the correct action. +// +// [DescribeAlarmHistory]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html func (c *Client) SetAlarmState(ctx context.Context, params *SetAlarmStateInput, optFns ...func(*Options)) (*SetAlarmStateOutput, error) { if params == nil { params = &SetAlarmStateInput{} @@ -57,10 +63,11 @@ type SetAlarmStateInput struct { // This member is required. StateValue types.StateValue - // The reason that this alarm is set to this specific state, in JSON format. For - // SNS or EC2 alarm actions, this is just informational. But for EC2 Auto Scaling - // or application Auto Scaling alarm actions, the Auto Scaling policy uses the - // information in this field to take the correct action. + // The reason that this alarm is set to this specific state, in JSON format. + // + // For SNS or EC2 alarm actions, this is just informational. But for EC2 Auto + // Scaling or application Auto Scaling alarm actions, the Auto Scaling policy uses + // the information in this field to take the correct action. StateReasonData *string noSmithyDocumentSerde @@ -128,6 +135,12 @@ func (c *Client) addOperationSetAlarmStateMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSetAlarmStateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StartMetricStreams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StartMetricStreams.go index 176974958f7..563b87ca454 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StartMetricStreams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StartMetricStreams.go @@ -28,10 +28,11 @@ func (c *Client) StartMetricStreams(ctx context.Context, params *StartMetricStre type StartMetricStreamsInput struct { - // The array of the names of metric streams to start streaming. This is an "all or - // nothing" operation. If you do not have permission to access all of the metric - // streams that you list here, then none of the streams that you list in the - // operation will start streaming. + // The array of the names of metric streams to start streaming. + // + // This is an "all or nothing" operation. If you do not have permission to access + // all of the metric streams that you list here, then none of the streams that you + // list in the operation will start streaming. // // This member is required. Names []string @@ -101,6 +102,12 @@ func (c *Client) addOperationStartMetricStreamsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartMetricStreamsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StopMetricStreams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StopMetricStreams.go index e886c05bf1e..3c8f148371d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StopMetricStreams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_StopMetricStreams.go @@ -28,10 +28,11 @@ func (c *Client) StopMetricStreams(ctx context.Context, params *StopMetricStream type StopMetricStreamsInput struct { - // The array of the names of metric streams to stop streaming. This is an "all or - // nothing" operation. If you do not have permission to access all of the metric - // streams that you list here, then none of the streams that you list in the - // operation will stop streaming. + // The array of the names of metric streams to stop streaming. + // + // This is an "all or nothing" operation. If you do not have permission to access + // all of the metric streams that you list here, then none of the streams that you + // list in the operation will stop streaming. // // This member is required. Names []string @@ -101,6 +102,12 @@ func (c *Client) addOperationStopMetricStreamsMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStopMetricStreamsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_TagResource.go index fc70adfc545..179b70ce43c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_TagResource.go @@ -13,16 +13,22 @@ import ( // Assigns one or more tags (key-value pairs) to the specified CloudWatch // resource. Currently, the only CloudWatch resources that can be tagged are alarms -// and Contributor Insights rules. Tags can help you organize and categorize your -// resources. You can also use them to scope user permissions by granting a user -// permission to access or change only resources with certain tag values. Tags -// don't have any semantic meaning to Amazon Web Services and are interpreted -// strictly as strings of characters. You can use the TagResource action with an -// alarm that already has tags. If you specify a new tag key for the alarm, this -// tag is appended to the list of tags associated with the alarm. If you specify a -// tag key that is already associated with the alarm, the new tag value that you -// specify replaces the previous value for that tag. You can associate as many as -// 50 tags with a CloudWatch resource. +// and Contributor Insights rules. +// +// Tags can help you organize and categorize your resources. You can also use them +// to scope user permissions by granting a user permission to access or change only +// resources with certain tag values. +// +// Tags don't have any semantic meaning to Amazon Web Services and are interpreted +// strictly as strings of characters. +// +// You can use the TagResource action with an alarm that already has tags. If you +// specify a new tag key for the alarm, this tag is appended to the list of tags +// associated with the alarm. If you specify a tag key that is already associated +// with the alarm, the new tag value that you specify replaces the previous value +// for that tag. +// +// You can associate as many as 50 tags with a CloudWatch resource. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -40,12 +46,18 @@ func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optF type TagResourceInput struct { - // The ARN of the CloudWatch resource that you're adding tags to. The ARN format - // of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name The ARN - // format of a Contributor Insights rule is - // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more - // information about ARN format, see Resource Types Defined by Amazon CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) - // in the Amazon Web Services General Reference. + // The ARN of the CloudWatch resource that you're adding tags to. + // + // The ARN format of an alarm is + // arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is + // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see [Resource Types Defined by Amazon CloudWatch] in the Amazon Web Services General + // Reference. + // + // [Resource Types Defined by Amazon CloudWatch]: https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies // // This member is required. ResourceARN *string @@ -120,6 +132,12 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_UntagResource.go index 5f287905b89..b30b89e955b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/api_op_UntagResource.go @@ -28,12 +28,18 @@ func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, type UntagResourceInput struct { - // The ARN of the CloudWatch resource that you're removing tags from. The ARN - // format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // The ARN of the CloudWatch resource that you're removing tags from. + // + // The ARN format of an alarm is + // arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // // The ARN format of a Contributor Insights rule is - // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name For more - // information about ARN format, see Resource Types Defined by Amazon CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) - // in the Amazon Web Services General Reference. + // arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see [Resource Types Defined by Amazon CloudWatch] in the Amazon Web Services General + // Reference. + // + // [Resource Types Defined by Amazon CloudWatch]: https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies // // This member is required. ResourceARN *string @@ -108,6 +114,12 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/auth.go index d7dd29021ef..15b28e8e249 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/deserializers.go index 0eccc85ab41..e5bd7c08fed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/deserializers.go @@ -25,6 +25,14 @@ import ( "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsquery_deserializeOpDeleteAlarms struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/doc.go index 7c7a039e6f2..f65ff3db874 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/doc.go @@ -6,14 +6,17 @@ // Amazon CloudWatch monitors your Amazon Web Services (Amazon Web Services) // resources and the applications you run on Amazon Web Services in real time. You // can use CloudWatch to collect and track metrics, which are the variables you -// want to measure for your resources and applications. CloudWatch alarms send -// notifications or automatically change the resources you are monitoring based on -// rules that you define. For example, you can monitor the CPU usage and disk reads -// and writes of your Amazon EC2 instances. Then, use this data to determine -// whether you should launch additional instances to handle increased load. You can -// also use this data to stop under-used instances to save money. In addition to -// monitoring the built-in metrics that come with Amazon Web Services, you can -// monitor your own custom metrics. With CloudWatch, you gain system-wide -// visibility into resource utilization, application performance, and operational -// health. +// want to measure for your resources and applications. +// +// CloudWatch alarms send notifications or automatically change the resources you +// are monitoring based on rules that you define. For example, you can monitor the +// CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this +// data to determine whether you should launch additional instances to handle +// increased load. You can also use this data to stop under-used instances to save +// money. +// +// In addition to monitoring the built-in metrics that come with Amazon Web +// Services, you can monitor your own custom metrics. With CloudWatch, you gain +// system-wide visibility into resource utilization, application performance, and +// operational health. package cloudwatch diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/endpoints.go index 1e1b8d079c6..bef96efb1fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/go_module_metadata.go index d13f939a796..9709548807e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/go_module_metadata.go @@ -3,4 +3,4 @@ package cloudwatch // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.0" +const goModuleVersion = "1.40.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/options.go index d6114e6c17c..3b099a2f8ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -53,8 +56,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -77,17 +82,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -104,8 +112,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -150,6 +159,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/enums.go index 1b62b8b9218..78906bbb8a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/enums.go @@ -12,8 +12,9 @@ const ( ) // Values returns all known values for ActionsSuppressedBy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ActionsSuppressedBy) Values() []ActionsSuppressedBy { return []ActionsSuppressedBy{ "WaitPeriod", @@ -31,8 +32,9 @@ const ( ) // Values returns all known values for AlarmType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (AlarmType) Values() []AlarmType { return []AlarmType{ "CompositeAlarm", @@ -51,6 +53,7 @@ const ( // Values returns all known values for AnomalyDetectorStateValue. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (AnomalyDetectorStateValue) Values() []AnomalyDetectorStateValue { return []AnomalyDetectorStateValue{ @@ -69,8 +72,9 @@ const ( ) // Values returns all known values for AnomalyDetectorType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (AnomalyDetectorType) Values() []AnomalyDetectorType { return []AnomalyDetectorType{ "SINGLE_METRIC", @@ -92,8 +96,9 @@ const ( ) // Values returns all known values for ComparisonOperator. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ComparisonOperator) Values() []ComparisonOperator { return []ComparisonOperator{ "GreaterThanOrEqualToThreshold", @@ -114,8 +119,9 @@ const ( ) // Values returns all known values for EvaluationState. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EvaluationState) Values() []EvaluationState { return []EvaluationState{ "PARTIAL_DATA", @@ -132,8 +138,9 @@ const ( ) // Values returns all known values for HistoryItemType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (HistoryItemType) Values() []HistoryItemType { return []HistoryItemType{ "ConfigurationUpdate", @@ -153,6 +160,7 @@ const ( // Values returns all known values for MetricStreamOutputFormat. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (MetricStreamOutputFormat) Values() []MetricStreamOutputFormat { return []MetricStreamOutputFormat{ @@ -170,8 +178,9 @@ const ( ) // Values returns all known values for RecentlyActive. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RecentlyActive) Values() []RecentlyActive { return []RecentlyActive{ "PT3H", @@ -187,8 +196,9 @@ const ( ) // Values returns all known values for ScanBy. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ScanBy) Values() []ScanBy { return []ScanBy{ "TimestampDescending", @@ -230,8 +240,9 @@ const ( ) // Values returns all known values for StandardUnit. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StandardUnit) Values() []StandardUnit { return []StandardUnit{ "Seconds", @@ -274,8 +285,9 @@ const ( ) // Values returns all known values for StateValue. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StateValue) Values() []StateValue { return []StateValue{ "OK", @@ -296,8 +308,9 @@ const ( ) // Values returns all known values for Statistic. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Statistic) Values() []Statistic { return []Statistic{ "SampleCount", @@ -319,8 +332,9 @@ const ( ) // Values returns all known values for StatusCode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StatusCode) Values() []StatusCode { return []StatusCode{ "Complete", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/types.go index b33d90b5891..e5d7e8eff46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/cloudwatch/types/types.go @@ -33,9 +33,10 @@ type AlarmHistoryItem struct { // An anomaly detection model associated with a particular CloudWatch metric, // statistic, or metric math expression. You can use the model to display a band of -// expected, normal values when the metric is graphed. If you have enabled unified -// cross-account observability, and this account is a monitoring account, the -// metric can be in the same account or a source account. +// expected, normal values when the metric is graphed. +// +// If you have enabled unified cross-account observability, and this account is a +// monitoring account, the metric can be in the same account or a source account. type AnomalyDetector struct { // The configuration specifies details about how the anomaly detection model is to @@ -92,9 +93,12 @@ type AnomalyDetectorConfiguration struct { // The time zone to use for the metric. This is useful to enable the model to // automatically account for daylight savings time changes if the metric is - // sensitive to such time changes. To specify a time zone, use the name of the time - // zone as specified in the standard tz database. For more information, see tz - // database (https://en.wikipedia.org/wiki/Tz_database) . + // sensitive to such time changes. + // + // To specify a time zone, use the name of the time zone as specified in the + // standard tz database. For more information, see [tz database]. + // + // [tz database]: https://en.wikipedia.org/wiki/Tz_database MetricTimezone *string noSmithyDocumentSerde @@ -107,7 +111,7 @@ type CompositeAlarm struct { // state. ActionsEnabled *bool - // When the value is ALARM , it means that the actions are suppressed because the + // When the value is ALARM , it means that the actions are suppressed because the // suppressor alarm is in ALARM When the value is WaitPeriod , it means that the // actions are suppressed because the composite alarm is waiting for the suppressor // alarm to go into into the ALARM state. The maximum waiting time is as specified @@ -119,23 +123,26 @@ type CompositeAlarm struct { // its actions. ActionsSuppressedBy ActionsSuppressedBy - // Captures the reason for action suppression. + // Captures the reason for action suppression. ActionsSuppressedReason *string - // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // Actions will be suppressed if the suppressor alarm is in the ALARM state. // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from an // existing alarm. ActionsSuppressor *string - // The maximum time in seconds that the composite alarm waits after suppressor + // The maximum time in seconds that the composite alarm waits after suppressor // alarm goes out of the ALARM state. After this time, the composite alarm - // performs its actions. ExtensionPeriod is required only when ActionsSuppressor - // is specified. + // performs its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. ActionsSuppressorExtensionPeriod *int32 - // The maximum time in seconds that the composite alarm waits for the suppressor + // The maximum time in seconds that the composite alarm waits for the suppressor // alarm to go into the ALARM state. After this time, the composite alarm performs - // its actions. WaitPeriod is required only when ActionsSuppressor is specified. + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. ActionsSuppressorWaitPeriod *int32 // The actions to execute when this alarm transitions to the ALARM state from any @@ -172,7 +179,7 @@ type CompositeAlarm struct { // An explanation for the alarm state, in JSON format. StateReasonData *string - // The timestamp of the last change to the alarm's StateValue . + // The timestamp of the last change to the alarm's StateValue . StateTransitionedTimestamp *time.Time // Tracks the timestamp of any state update, even if StateValue doesn't change. @@ -252,7 +259,9 @@ type Datapoint struct { // add a unique name/value pair to one of your metrics, you are creating a new // variation of that metric. For example, many Amazon EC2 metrics publish // InstanceId as a dimension name, and the actual instance ID as the value for that -// dimension. You can assign up to 30 dimensions to a metric. +// dimension. +// +// You can assign up to 30 dimensions to a metric. type Dimension struct { // The name of the dimension. Dimension names must contain only ASCII characters, @@ -288,16 +297,17 @@ type DimensionFilter struct { } // This structure contains the definition for a Contributor Insights rule. For -// more information about this rule, see Using Constributor Insights to analyze -// high-cardinality data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html) -// in the Amazon CloudWatch User Guide. +// more information about this rule, see[Using Constributor Insights to analyze high-cardinality data] in the Amazon CloudWatch User Guide. +// +// [Using Constributor Insights to analyze high-cardinality data]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html type InsightRule struct { // The definition of the rule, as a JSON object. The definition contains the // keywords used to define contributors, the value to aggregate on if this rule // returns a sum instead of a count, and the filters. For details on the valid - // syntax, see Contributor Insights Rule Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html) - // . + // syntax, see [Contributor Insights Rule Syntax]. + // + // [Contributor Insights Rule Syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html // // This member is required. Definition *string @@ -319,7 +329,7 @@ type InsightRule struct { // This member is required. State *string - // An optional built-in rule that Amazon Web Services manages. + // An optional built-in rule that Amazon Web Services manages. ManagedRule *bool noSmithyDocumentSerde @@ -327,10 +337,14 @@ type InsightRule struct { // One of the unique contributors found by a Contributor Insights rule. If the // rule contains multiple keys, then a unique contributor is a unique combination -// of values from all the keys in the rule. If the rule contains a single key, then -// each unique contributor is each unique value for this key. For more information, -// see GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html) -// . +// of values from all the keys in the rule. +// +// If the rule contains a single key, then each unique contributor is each unique +// value for this key. +// +// For more information, see [GetInsightRuleReport]. +// +// [GetInsightRuleReport]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html type InsightRuleContributor struct { // An approximation of the aggregate value that comes from this contributor. @@ -353,10 +367,12 @@ type InsightRuleContributor struct { noSmithyDocumentSerde } -// One data point related to one contributor. For more information, see -// GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html) -// and InsightRuleContributor (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_InsightRuleContributor.html) -// . +// One data point related to one contributor. +// +// For more information, see [GetInsightRuleReport] and [InsightRuleContributor]. +// +// [GetInsightRuleReport]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html +// [InsightRuleContributor]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_InsightRuleContributor.html type InsightRuleContributorDatapoint struct { // The approximate value that this contributor added during this timestamp. @@ -373,8 +389,11 @@ type InsightRuleContributorDatapoint struct { } // One data point from the metric time series returned in a Contributor Insights -// rule report. For more information, see GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html) -// . +// rule report. +// +// For more information, see [GetInsightRuleReport]. +// +// [GetInsightRuleReport]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html type InsightRuleMetricDatapoint struct { // The timestamp of the data point. @@ -383,37 +402,49 @@ type InsightRuleMetricDatapoint struct { Timestamp *time.Time // The average value from all contributors during the time period represented by - // that data point. This statistic is returned only if you included it in the - // Metrics array in your request. + // that data point. + // + // This statistic is returned only if you included it in the Metrics array in your + // request. Average *float64 // The maximum value provided by one contributor during this timestamp. Each // timestamp is evaluated separately, so the identity of the max contributor could - // be different for each timestamp. This statistic is returned only if you included - // it in the Metrics array in your request. + // be different for each timestamp. + // + // This statistic is returned only if you included it in the Metrics array in your + // request. MaxContributorValue *float64 // The maximum value from a single occurence from a single contributor during the - // time period represented by that data point. This statistic is returned only if - // you included it in the Metrics array in your request. + // time period represented by that data point. + // + // This statistic is returned only if you included it in the Metrics array in your + // request. Maximum *float64 // The minimum value from a single contributor during the time period represented - // by that data point. This statistic is returned only if you included it in the - // Metrics array in your request. + // by that data point. + // + // This statistic is returned only if you included it in the Metrics array in your + // request. Minimum *float64 - // The number of occurrences that matched the rule during this data point. This - // statistic is returned only if you included it in the Metrics array in your + // The number of occurrences that matched the rule during this data point. + // + // This statistic is returned only if you included it in the Metrics array in your // request. SampleCount *float64 // The sum of the values from all contributors during the time period represented - // by that data point. This statistic is returned only if you included it in the - // Metrics array in your request. + // by that data point. + // + // This statistic is returned only if you included it in the Metrics array in your + // request. Sum *float64 // The number of unique contributors who published data during this timestamp. + // // This statistic is returned only if you included it in the Metrics array in your // request. UniqueContributors *float64 @@ -423,10 +454,13 @@ type InsightRuleMetricDatapoint struct { // This structure includes the Timezone parameter, which you can use to specify // your time zone so that the labels that are associated with returned metrics -// display the correct time for your time zone. The Timezone value affects a label -// only if you have a time-based dynamic expression in the label. For more -// information about dynamic expressions in labels, see Using Dynamic Labels (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html) -// . +// display the correct time for your time zone. +// +// The Timezone value affects a label only if you have a time-based dynamic +// expression in the label. For more information about dynamic expressions in +// labels, see [Using Dynamic Labels]. +// +// [Using Dynamic Labels]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html type LabelOptions struct { // The time zone to use for metric data return in this operation. The format is + @@ -439,23 +473,24 @@ type LabelOptions struct { noSmithyDocumentSerde } -// Contains the information that's required to enable a managed Contributor +// Contains the information that's required to enable a managed Contributor +// // Insights rule for an Amazon Web Services resource. type ManagedRule struct { - // The ARN of an Amazon Web Services resource that has managed Contributor + // The ARN of an Amazon Web Services resource that has managed Contributor // Insights rules. // // This member is required. ResourceARN *string - // The template name for the managed Contributor Insights rule, as returned by + // The template name for the managed Contributor Insights rule, as returned by // ListManagedInsightRules . // // This member is required. TemplateName *string - // A list of key-value pairs that you can associate with a managed Contributor + // A list of key-value pairs that you can associate with a managed Contributor // Insights rule. You can associate as many as 50 tags with a rule. Tags can help // you organize and categorize your resources. You also can use them to scope user // permissions by granting a user permission to access or change only the resources @@ -469,20 +504,21 @@ type ManagedRule struct { noSmithyDocumentSerde } -// Contains information about managed Contributor Insights rules, as returned by +// Contains information about managed Contributor Insights rules, as returned by +// // ListManagedInsightRules . type ManagedRuleDescription struct { - // If a managed rule is enabled, this is the ARN for the related Amazon Web + // If a managed rule is enabled, this is the ARN for the related Amazon Web // Services resource. ResourceARN *string - // Describes the state of a managed rule. If present, it contains information + // Describes the state of a managed rule. If present, it contains information // about the Contributor Insights rule that contains information about the related // Amazon Web Services resource. RuleState *ManagedRuleState - // The template name for the managed rule. Used to enable managed rules using + // The template name for the managed rule. Used to enable managed rules using // PutManagedInsightRules . TemplateName *string @@ -492,13 +528,13 @@ type ManagedRuleDescription struct { // The status of a managed Contributor Insights rule. type ManagedRuleState struct { - // The name of the Contributor Insights rule that contains data for the specified + // The name of the Contributor Insights rule that contains data for the specified // Amazon Web Services resource. // // This member is required. RuleName *string - // Indicates whether the rule is enabled or disabled. + // Indicates whether the rule is enabled or disabled. // // This member is required. State *string @@ -507,6 +543,7 @@ type ManagedRuleState struct { } // A message returned by the GetMetricData API, including a code and a description. +// // If a cross-Region GetMetricData operation fails with a code of Forbidden and a // value of Authentication too complex to retrieve cross region data , you can // correct the problem by running the GetMetricData operation in the same Region @@ -581,9 +618,9 @@ type MetricAlarm struct { // If the value of this field is PARTIAL_DATA , the alarm is being evaluated based // on only partial data. This happens if the query used for the alarm returns more - // than 10,000 metrics. For more information, see Create alarms on Metrics - // Insights queries (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html) - // . + // than 10,000 metrics. For more information, see [Create alarms on Metrics Insights queries]. + // + // [Create alarms on Metrics Insights queries]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html EvaluationState EvaluationState // The percentile statistic for the metric associated with the alarm. Specify a @@ -601,8 +638,10 @@ type MetricAlarm struct { // An array of MetricDataQuery structures, used in an alarm based on a metric math // expression. Each structure either retrieves a metric or performs a math - // expression. One item in the Metrics array is the math expression that the alarm - // watches. This expression by designated by having ReturnData set to true. + // expression. + // + // One item in the Metrics array is the math expression that the alarm watches. + // This expression by designated by having ReturnData set to true. Metrics []MetricDataQuery // The namespace of the metric associated with the alarm. @@ -643,9 +682,11 @@ type MetricAlarm struct { ThresholdMetricId *string // Sets how this alarm is to handle missing data points. The valid values are - // breaching , notBreaching , ignore , and missing . For more information, see - // Configuring how CloudWatch alarms treat missing data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data) - // . If this parameter is omitted, the default behavior of missing is used. + // breaching , notBreaching , ignore , and missing . For more information, see [Configuring how CloudWatch alarms treat missing data]. + // + // If this parameter is omitted, the default behavior of missing is used. + // + // [Configuring how CloudWatch alarms treat missing data]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data TreatMissingData *string // The unit of the metric associated with the alarm. @@ -668,25 +709,31 @@ type MetricCharacteristics struct { } // This structure is used in both GetMetricData and PutMetricAlarm . The supported -// use of this structure is different for those two operations. When used in -// GetMetricData , it indicates the metric data to return, and whether this call is -// just retrieving a batch set of data for one metric, or is performing a Metrics -// Insights query or a math expression. A single GetMetricData call can include up -// to 500 MetricDataQuery structures. When used in PutMetricAlarm , it enables you -// to create an alarm based on a metric math expression. Each MetricDataQuery in -// the array specifies either a metric to retrieve, or a math expression to be -// performed on retrieved metrics. A single PutMetricAlarm call can include up to -// 20 MetricDataQuery structures in the array. The 20 structures can include as -// many as 10 structures that contain a MetricStat parameter to retrieve a metric, -// and as many as 10 structures that contain the Expression parameter to perform a -// math expression. Of those Expression structures, one must have true as the -// value for ReturnData . The result of this expression is the value the alarm -// watches. Any expression used in a PutMetricAlarm operation must return a single -// time series. For more information, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) -// in the Amazon CloudWatch User Guide. Some of the parameters of this structure -// also have different uses whether you are using this structure in a GetMetricData -// operation or a PutMetricAlarm operation. These differences are explained in the -// following parameter list. +// use of this structure is different for those two operations. +// +// When used in GetMetricData , it indicates the metric data to return, and whether +// this call is just retrieving a batch set of data for one metric, or is +// performing a Metrics Insights query or a math expression. A single GetMetricData +// call can include up to 500 MetricDataQuery structures. +// +// When used in PutMetricAlarm , it enables you to create an alarm based on a +// metric math expression. Each MetricDataQuery in the array specifies either a +// metric to retrieve, or a math expression to be performed on retrieved metrics. A +// single PutMetricAlarm call can include up to 20 MetricDataQuery structures in +// the array. The 20 structures can include as many as 10 structures that contain a +// MetricStat parameter to retrieve a metric, and as many as 10 structures that +// contain the Expression parameter to perform a math expression. Of those +// Expression structures, one must have true as the value for ReturnData . The +// result of this expression is the value the alarm watches. +// +// Any expression used in a PutMetricAlarm operation must return a single time +// series. For more information, see [Metric Math Syntax and Functions]in the Amazon CloudWatch User Guide. +// +// Some of the parameters of this structure also have different uses whether you +// are using this structure in a GetMetricData operation or a PutMetricAlarm +// operation. These differences are explained in the following parameter list. +// +// [Metric Math Syntax and Functions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax type MetricDataQuery struct { // A short name used to tie this object to the results in the response. This name @@ -699,37 +746,48 @@ type MetricDataQuery struct { // This member is required. Id *string - // The ID of the account where the metrics are located. If you are performing a - // GetMetricData operation in a monitoring account, use this to specify which - // account to retrieve this metric from. If you are performing a PutMetricAlarm - // operation, use this to specify which account contains the metric that the alarm - // is watching. + // The ID of the account where the metrics are located. + // + // If you are performing a GetMetricData operation in a monitoring account, use + // this to specify which account to retrieve this metric from. + // + // If you are performing a PutMetricAlarm operation, use this to specify which + // account contains the metric that the alarm is watching. AccountId *string // This field can contain either a Metrics Insights query, or a metric math // expression to be performed on the returned data. For more information about - // Metrics Insights queries, see Metrics Insights query components and syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage) - // in the Amazon CloudWatch User Guide. A math expression can use the Id of the - // other metrics or queries to refer to those metrics, and can also use the Id of - // other expressions to use the result of those expressions. For more information - // about metric math expressions, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) - // in the Amazon CloudWatch User Guide. Within each MetricDataQuery object, you - // must specify either Expression or MetricStat but not both. + // Metrics Insights queries, see [Metrics Insights query components and syntax]in the Amazon CloudWatch User Guide. + // + // A math expression can use the Id of the other metrics or queries to refer to + // those metrics, and can also use the Id of other expressions to use the result + // of those expressions. For more information about metric math expressions, see [Metric Math Syntax and Functions] + // in the Amazon CloudWatch User Guide. + // + // Within each MetricDataQuery object, you must specify either Expression or + // MetricStat but not both. + // + // [Metrics Insights query components and syntax]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage + // [Metric Math Syntax and Functions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax Expression *string // A human-readable label for this metric or expression. This is especially useful // if this is an expression, so that you know what the value represents. If the // metric or expression is shown in a CloudWatch dashboard widget, the label is - // shown. If Label is omitted, CloudWatch generates a default. You can put dynamic - // expressions into a label, so that it is more descriptive. For more information, - // see Using Dynamic Labels (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html) - // . + // shown. If Label is omitted, CloudWatch generates a default. + // + // You can put dynamic expressions into a label, so that it is more descriptive. + // For more information, see [Using Dynamic Labels]. + // + // [Using Dynamic Labels]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html Label *string // The metric to be returned, along with statistics, period, and units. Use this // parameter only if this object is retrieving a metric and not performing a math - // expression on returned data. Within one MetricDataQuery object, you must specify - // either Expression or MetricStat but not both. + // expression on returned data. + // + // Within one MetricDataQuery object, you must specify either Expression or + // MetricStat but not both. MetricStat *MetricStat // The granularity, in seconds, of the returned data points. For metrics with @@ -743,9 +801,10 @@ type MetricDataQuery struct { // When used in GetMetricData , this option indicates whether to return the // timestamps and raw data values of this metric. If you are performing this call // just to do math expressions and do not also need the raw data returned, you can - // specify false . If you omit this, the default of true is used. When used in - // PutMetricAlarm , specify true for the one expression result to use as the - // alarm. For all other metrics and expressions in the same PutMetricAlarm + // specify false . If you omit this, the default of true is used. + // + // When used in PutMetricAlarm , specify true for the one expression result to use + // as the alarm. For all other metrics and expressions in the same PutMetricAlarm // operation, specify ReturnData as False. ReturnData *bool @@ -798,9 +857,11 @@ type MetricDatum struct { // Array of numbers that is used along with the Values array. Each number in the // Count array is the number of times the corresponding value in the Values array - // occurred during the period. If you omit the Counts array, the default of 1 is - // used as the value for each count. If you include a Counts array, it must - // include the same amount of values as the Values array. + // occurred during the period. + // + // If you omit the Counts array, the default of 1 is used as the value for each + // count. If you include a Counts array, it must include the same amount of values + // as the Values array. Counts []float64 // The dimensions associated with the metric. @@ -814,9 +875,12 @@ type MetricDatum struct { // resolution down to one second. Setting this to 60 specifies this metric as a // regular-resolution metric, which CloudWatch stores at 1-minute resolution. // Currently, high resolution is available only for custom metrics. For more - // information about high-resolution metrics, see High-Resolution Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics) - // in the Amazon CloudWatch User Guide. This field is optional, if you do not - // specify it the default of 60 is used. + // information about high-resolution metrics, see [High-Resolution Metrics]in the Amazon CloudWatch User + // Guide. + // + // This field is optional, if you do not specify it the default of 60 is used. + // + // [High-Resolution Metrics]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics StorageResolution *int32 // The time the metric data was received, expressed as the number of milliseconds @@ -824,24 +888,29 @@ type MetricDatum struct { Timestamp *time.Time // When you are using a Put operation, this defines what unit you want to use when - // storing the metric. In a Get operation, this displays the unit that is used for - // the metric. + // storing the metric. + // + // In a Get operation, this displays the unit that is used for the metric. Unit StandardUnit - // The value for the metric. Although the parameter accepts numbers of type - // Double, CloudWatch rejects values that are either too small or too large. Values - // must be in the range of -2^360 to 2^360. In addition, special values (for - // example, NaN, +Infinity, -Infinity) are not supported. + // The value for the metric. + // + // Although the parameter accepts numbers of type Double, CloudWatch rejects + // values that are either too small or too large. Values must be in the range of + // -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, + // -Infinity) are not supported. Value *float64 // Array of numbers representing the values for the metric during the period. Each // unique value is listed just once in this array, and the corresponding number in // the Counts array specifies the number of times that value occurred during the // period. You can include up to 150 unique values in each PutMetricData action - // that specifies a Values array. Although the Values array accepts numbers of - // type Double , CloudWatch rejects values that are either too small or too large. - // Values must be in the range of -2^360 to 2^360. In addition, special values (for - // example, NaN, +Infinity, -Infinity) are not supported. + // that specifies a Values array. + // + // Although the Values array accepts numbers of type Double , CloudWatch rejects + // values that are either too small or too large. Values must be in the range of + // -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, + // -Infinity) are not supported. Values []float64 noSmithyDocumentSerde @@ -879,13 +948,18 @@ type MetricStat struct { // be a multiple of 60. For high-resolution metrics that are collected at intervals // of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of // 60. High-resolution metrics are those metrics stored by a PutMetricData call - // that includes a StorageResolution of 1 second. If the StartTime parameter - // specifies a time stamp that is greater than 3 hours ago, you must specify the - // period as follows or no data points in that time range is returned: + // that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 hours + // ago, you must specify the period as follows or no data points in that time range + // is returned: + // // - Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds // (1 minute). + // // - Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 // minutes). + // // - Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 // hour). // @@ -899,12 +973,14 @@ type MetricStat struct { Stat *string // When you are using a Put operation, this defines what unit you want to use when - // storing the metric. In a Get operation, if you omit Unit then all data that was - // collected with any unit is returned, along with the corresponding units that - // were specified when the data was reported to CloudWatch. If you specify a unit, - // the operation returns only data that was collected with that unit specified. If - // you specify a unit that does not match the data collected, the results of the - // operation are null. CloudWatch does not perform unit conversions. + // storing the metric. + // + // In a Get operation, if you omit Unit then all data that was collected with any + // unit is returned, along with the corresponding units that were specified when + // the data was reported to CloudWatch. If you specify a unit, the operation + // returns only data that was collected with that unit specified. If you specify a + // unit that does not match the data collected, the results of the operation are + // null. CloudWatch does not perform unit conversions. Unit StandardUnit noSmithyDocumentSerde @@ -940,24 +1016,29 @@ type MetricStreamEntry struct { } // This structure contains a metric namespace and optionally, a list of metric -// names, to either include in a metric stream or exclude from a metric stream. A -// metric stream's filters can include up to 1000 total names. This limit applies -// to the sum of namespace names and metric names in the filters. For example, this -// could include 10 metric namespace filters with 99 metrics each, or 20 namespace -// filters with 49 metrics specified in each filter. +// names, to either include in a metric stream or exclude from a metric stream. +// +// A metric stream's filters can include up to 1000 total names. This limit +// applies to the sum of namespace names and metric names in the filters. For +// example, this could include 10 metric namespace filters with 99 metrics each, or +// 20 namespace filters with 49 metrics specified in each filter. type MetricStreamFilter struct { // The names of the metrics to either include or exclude from the metric stream. + // // If you omit this parameter, all metrics in the namespace are included or // excluded, depending on whether this filter is specified as an exclude filter or - // an include filter. Each metric name can contain only ASCII printable characters - // (ASCII range 32 through 126). Each metric name must contain at least one - // non-whitespace character. + // an include filter. + // + // Each metric name can contain only ASCII printable characters (ASCII range 32 + // through 126). Each metric name must contain at least one non-whitespace + // character. MetricNames []string - // The name of the metric namespace for this filter. The namespace can contain - // only ASCII printable characters (ASCII range 32 through 126). It must contain at - // least one non-whitespace character. + // The name of the metric namespace for this filter. + // + // The namespace can contain only ASCII printable characters (ASCII range 32 + // through 126). It must contain at least one non-whitespace character. Namespace *string noSmithyDocumentSerde @@ -966,28 +1047,33 @@ type MetricStreamFilter struct { // By default, a metric stream always sends the MAX , MIN , SUM , and SAMPLECOUNT // statistics for each metric that is streamed. This structure contains information // for one metric that includes additional statistics in the stream. For more -// information about statistics, see CloudWatch, listed in CloudWatch statistics -// definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html) -// . +// information about statistics, see CloudWatch, listed in [CloudWatch statistics definitions]. +// +// [CloudWatch statistics definitions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html type MetricStreamStatisticsConfiguration struct { // The list of additional statistics that are to be streamed for the metrics // listed in the IncludeMetrics array in this structure. This list can include as - // many as 20 statistics. If the OutputFormat for the stream is opentelemetry1.0 - // or opentelemetry0.7 , the only valid values are p?? percentile statistics such - // as p90 , p99 and so on. If the OutputFormat for the stream is json , the valid - // values include the abbreviations for all of the statistics listed in CloudWatch - // statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html) - // . For example, this includes tm98, wm90 , PR(:300) , and so on. + // many as 20 statistics. + // + // If the OutputFormat for the stream is opentelemetry1.0 or opentelemetry0.7 , the + // only valid values are p?? percentile statistics such as p90 , p99 and so on. + // + // If the OutputFormat for the stream is json , the valid values include the + // abbreviations for all of the statistics listed in [CloudWatch statistics definitions]. For example, this includes + // tm98, wm90 , PR(:300) , and so on. + // + // [CloudWatch statistics definitions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html // // This member is required. AdditionalStatistics []string // An array of metric name and namespace pairs that stream the additional // statistics listed in the value of the AdditionalStatistics parameter. There can - // be as many as 100 pairs in the array. All metrics that match the combination of - // metric name and namespace will be streamed with the additional statistics, no - // matter their dimensions. + // be as many as 100 pairs in the array. + // + // All metrics that match the combination of metric name and namespace will be + // streamed with the additional statistics, no matter their dimensions. // // This member is required. IncludeMetrics []MetricStreamStatisticsMetric diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md index 0c2a6379356..9f46382c554 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md @@ -1,3 +1,77 @@ +# v1.34.4 (2024-07-24) + +* **Documentation**: DynamoDB doc only update for July + +# v1.34.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.33.2 (2024-06-20) + +* **Documentation**: Doc-only update for DynamoDB. Fixed Important note in 6 Global table APIs - CreateGlobalTable, DescribeGlobalTable, DescribeGlobalTableSettings, ListGlobalTables, UpdateGlobalTable, and UpdateGlobalTableSettings. + +# v1.33.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.9 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.8 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.7 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.6 (2024-05-28) + +* **Documentation**: Doc-only update for DynamoDB. Specified the IAM actions needed to authorize a user to create a table with a resource-based policy. + +# v1.32.5 (2024-05-24) + +* **Documentation**: Documentation only updates for DynamoDB. + +# v1.32.4 (2024-05-23) + +* No change notes available for this release. + +# v1.32.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.32.0 (2024-05-02) + +* **Feature**: This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs. + # v1.31.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go index bd7f2a2e2fd..2bb5801fa21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go @@ -15,10 +15,12 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" ddbcust "github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" @@ -28,6 +30,7 @@ import ( "net/http" "net/url" "strings" + "sync/atomic" "time" ) @@ -40,6 +43,9 @@ type Client struct { // cache used to store discovered endpoints endpointCache *internalEndpointDiscovery.EndpointCache + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -84,6 +90,8 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointCache(client) + initializeTimeOffsetResolver(client) + return client } @@ -245,15 +253,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -458,6 +467,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func resolveIdempotencyTokenProvider(o *Options) { if o.IdempotencyTokenProvider != nil { return @@ -590,6 +623,63 @@ func (c *Client) handleEndpointDiscoveryFromService(ctx context.Context, input * return endpoint, nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + // IdempotencyTokenProvider interface for providing idempotency token type IdempotencyTokenProvider interface { GetIdempotencyToken() (string, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go index d33688b2630..633e64ec5ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go @@ -14,12 +14,18 @@ import ( // This operation allows you to perform batch reads or writes on data stored in // DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must // specify an equality condition on all key attributes. This enforces that each -// SELECT statement in a batch returns at most a single item. The entire batch must -// consist of either read statements or write statements, you cannot mix both in -// one batch. A HTTP 200 response does not mean that all statements in the +// SELECT statement in a batch returns at most a single item. For more information, +// see [Running batch operations with PartiQL for DynamoDB]. +// +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// +// A HTTP 200 response does not mean that all statements in the // BatchExecuteStatement succeeded. Error details for individual statements can be -// found under the Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) -// field of the BatchStatementResponse for each statement. +// found under the [Error]field of the BatchStatementResponse for each statement. +// +// [Error]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error +// [Running batch operations with PartiQL for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.multiplestatements.batching.html func (c *Client) BatchExecuteStatement(ctx context.Context, params *BatchExecuteStatementInput, optFns ...func(*Options)) (*BatchExecuteStatementOutput, error) { if params == nil { params = &BatchExecuteStatementInput{} @@ -44,13 +50,18 @@ type BatchExecuteStatementInput struct { // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -128,6 +139,12 @@ func (c *Client) addOperationBatchExecuteStatementMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpBatchExecuteStatementValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go index bd39f8d3447..d8581acb831 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go @@ -13,43 +13,57 @@ import ( ) // The BatchGetItem operation returns the attributes of one or more items from one -// or more tables. You identify requested items by primary key. A single operation -// can retrieve up to 16 MB of data, which can contain as many as 100 items. -// BatchGetItem returns a partial result if the response size limit is exceeded, -// the table's provisioned throughput is exceeded, more than 1MB per partition is -// requested, or an internal processing failure occurs. If a partial result is -// returned, the operation returns a value for UnprocessedKeys . You can use this -// value to retry the operation starting with the next item to get. If you request -// more than 100 items, BatchGetItem returns a ValidationException with the -// message "Too many items requested for the BatchGetItem call." For example, if -// you ask to retrieve 100 items, but each individual item is 300 KB in size, the -// system returns 52 items (so as not to exceed the 16 MB limit). It also returns -// an appropriate UnprocessedKeys value so you can get the next page of results. -// If desired, your application can include its own logic to assemble the pages of -// results into one dataset. If none of the items can be processed due to -// insufficient provisioned throughput on all of the tables in the request, then -// BatchGetItem returns a ProvisionedThroughputExceededException . If at least one -// of the items is successfully processed, then BatchGetItem completes -// successfully, while returning the keys of the unread items in UnprocessedKeys . +// or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as many +// as 100 items. BatchGetItem returns a partial result if the response size limit +// is exceeded, the table's provisioned throughput is exceeded, more than 1MB per +// partition is requested, or an internal processing failure occurs. If a partial +// result is returned, the operation returns a value for UnprocessedKeys . You can +// use this value to retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is 300 +// KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). +// It also returns an appropriate UnprocessedKeys value so you can get the next +// page of results. If desired, your application can include its own logic to +// assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchGetItem returns a +// ProvisionedThroughputExceededException . If at least one of the items is +// successfully processed, then BatchGetItem completes successfully, while +// returning the keys of the unread items in UnprocessedKeys . +// // If DynamoDB returns any unprocessed items, you should retry the batch operation // on those items. However, we strongly recommend that you use an exponential // backoff algorithm. If you retry the batch operation immediately, the underlying // read or write requests can still fail due to throttling on the individual // tables. If you delay the batch operation using exponential backoff, the -// individual requests in the batch are much more likely to succeed. For more -// information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) -// in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs -// eventually consistent reads on every table in the request. If you want strongly -// consistent reads instead, you can set ConsistentRead to true for any or all -// tables. In order to minimize response latency, BatchGetItem may retrieve items -// in parallel. When designing your application, keep in mind that DynamoDB does -// not return items in any particular order. To help parse the response by item, -// include the primary key values for the items in your request in the -// ProjectionExpression parameter. If a requested item does not exist, it is not -// returned in the result. Requests for nonexistent items consume the minimum read -// capacity units according to the type of read. For more information, see Working -// with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) -// in the Amazon DynamoDB Developer Guide. +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table in +// the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem may retrieve items in +// parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include the +// primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to the +// type of read. For more information, see [Working with Tables]in the Amazon DynamoDB Developer Guide. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations +// [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations func (c *Client) BatchGetItem(ctx context.Context, params *BatchGetItemInput, optFns ...func(*Options)) (*BatchGetItemOutput, error) { if params == nil { params = &BatchGetItemInput{} @@ -70,59 +84,87 @@ type BatchGetItemInput struct { // A map of one or more table names or table ARNs and, for each table, a map that // describes one or more items to retrieve from that table. Each table name or ARN - // can be used only once per BatchGetItem request. Each element in the map of - // items to retrieve consists of the following: + // can be used only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // // - ConsistentRead - If true , a strongly consistent read is used; if false (the // default), an eventually consistent read is used. + // // - ExpressionAttributeNames - One or more substitution tokens for attribute // names in the ProjectionExpression parameter. The following are some use cases // for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being - // misinterpreted in an expression. Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: - // - Percentile The name of this attribute conflicts with a reserved word, so it - // cannot be used directly in an expression. (For the complete list of reserved - // words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : - // - {"#P":"Percentile"} You could then use this substitution in an expression, - // as in this example: - // - #P = :val Tokens that begin with the : character are expression attribute - // values, which are placeholders for the actual value at runtime. For more - // information about expression attribute names, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in + // the Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Accessing Item Attributes]in the Amazon + // DynamoDB Developer Guide. + // // - Keys - An array of primary key attribute values that define specific items // in the table. For each primary key, you must provide all of the key attributes. // For example, with a simple primary key, you only need to provide the partition // key value. For a composite key, you must provide both the partition key value // and the sort key value. + // // - ProjectionExpression - A string that identifies one or more attributes to // retrieve from the table. These attributes can include scalars, sets, or elements // of a JSON document. The attributes in the expression must be separated by - // commas. If no attribute names are specified, then all attributes are returned. - // If any of the requested attributes are not found, they do not appear in the - // result. For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Accessing Item Attributes]in the Amazon DynamoDB Developer Guide. + // // - AttributesToGet - This is a legacy parameter. Use ProjectionExpression - // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // instead. For more information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html // // This member is required. RequestItems map[string]types.KeysAndAttributes // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -132,9 +174,12 @@ type BatchGetItemInput struct { // Represents the output of a BatchGetItem operation. type BatchGetItemOutput struct { - // The read capacity units consumed by the entire BatchGetItem operation. Each - // element consists of: + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // // - TableName - The table that consumed the provisioned throughput. + // // - CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []types.ConsumedCapacity @@ -146,16 +191,21 @@ type BatchGetItemOutput struct { // A map of tables and their respective keys that were not processed with the // current response. The UnprocessedKeys value is in the same form as RequestItems // , so the value can be provided directly to a subsequent BatchGetItem operation. - // For more information, see RequestItems in the Request Parameters section. Each - // element consists of: + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // // - Keys - An array of primary key attribute values that define specific items // in the table. + // // - ProjectionExpression - One or more attributes to be retrieved from the table // or index. By default, all attributes are returned. If a requested attribute is // not found, it does not appear in the result. + // // - ConsistentRead - The consistency of a read operation. If set to true , then // a strongly consistent read is used; otherwise, an eventually consistent read is // used. + // // If there are no unprocessed keys remaining, the response contains an empty // UnprocessedKeys map. UnprocessedKeys map[string]types.KeysAndAttributes @@ -224,6 +274,12 @@ func (c *Client) addOperationBatchGetItemMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpBatchGetItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go index cae206a0a91..2e23a47c0b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go @@ -17,57 +17,85 @@ import ( // the network, consisting of up to 25 item put or delete operations. While // individual items can be up to 400 KB once stored, it's important to note that an // item's representation might be greater than 400KB while being sent in DynamoDB's -// JSON format for the API call. For more details on this distinction, see Naming -// Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html) -// . BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// JSON format for the API call. For more details on this distinction, see [Naming Rules and Data Types]. +// +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation // on an existing item, that item's values will be overwritten by the operation and // it will appear like it was updated. To update items, we recommend you use the -// UpdateItem action. The individual PutItem and DeleteItem operations specified -// in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any -// requested operations fail because the table's provisioned throughput is exceeded -// or an internal processing failure occurs, the failed operations are returned in -// the UnprocessedItems response parameter. You can investigate and optionally -// resend the requests. Typically, you would call BatchWriteItem in a loop. Each +// UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested +// operations fail because the table's provisioned throughput is exceeded or an +// internal processing failure occurs, the failed operations are returned in the +// UnprocessedItems response parameter. You can investigate and optionally resend +// the requests. Typically, you would call BatchWriteItem in a loop. Each // iteration would check for unprocessed items and submit a new BatchWriteItem -// request with those unprocessed items until all items have been processed. If -// none of the items can be processed due to insufficient provisioned throughput on -// all of the tables in the request, then BatchWriteItem returns a -// ProvisionedThroughputExceededException . If DynamoDB returns any unprocessed -// items, you should retry the batch operation on those items. However, we strongly -// recommend that you use an exponential backoff algorithm. If you retry the batch -// operation immediately, the underlying read or write requests can still fail due -// to throttling on the individual tables. If you delay the batch operation using -// exponential backoff, the individual requests in the batch are much more likely -// to succeed. For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) -// in the Amazon DynamoDB Developer Guide. With BatchWriteItem , you can -// efficiently write or delete large amounts of data, such as from Amazon EMR, or -// copy data from another database into DynamoDB. In order to improve performance -// with these large-scale operations, BatchWriteItem does not behave in the same -// way as individual PutItem and DeleteItem calls would. For example, you cannot -// specify conditions on individual put and delete requests, and BatchWriteItem -// does not return deleted items in the response. If you use a programming language -// that supports concurrency, you can use threads to write items in parallel. Your -// application must include the necessary logic to manage the threads. With -// languages that don't support threading, you must update or delete the specified -// items one at a time. In both situations, BatchWriteItem performs the specified -// put and delete operations in parallel, giving you the power of the thread pool -// approach without having to introduce complexity into your application. Parallel -// processing reduces latency, but each specified put and delete request consumes -// the same number of write capacity units whether it is processed in parallel or -// not. Delete operations on nonexistent items consume one write capacity unit. If -// one or more of the following is true, DynamoDB rejects the entire batch write -// operation: +// request with those unprocessed items until all items have been processed. +// +// For tables and indexes with provisioned capacity, if none of the items can be +// processed due to insufficient provisioned throughput on all of the tables in the +// request, then BatchWriteItem returns a ProvisionedThroughputExceededException . +// For all tables and indexes, if none of the items can be processed due to other +// throttling scenarios (such as exceeding partition level limits), then +// BatchWriteItem returns a ThrottlingException . +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem , you can efficiently write or delete large amounts of data, +// such as from Amazon EMR, or copy data from another database into DynamoDB. In +// order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, you +// must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed in +// parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// // - One or more tables specified in the BatchWriteItem request does not exist. +// // - Primary key attributes specified on an item in the request do not match // those in the corresponding table's primary key schema. +// // - You try to perform multiple operations on the same item in the same // BatchWriteItem request. For example, you cannot put and delete the same item // in the same BatchWriteItem request. +// // - Your request contains at least two items with identical hash and range keys // (which essentially is two put operations). +// // - There are more than 25 requests in the batch. +// // - Any individual item in a batch exceeds 400 KB. +// // - The total request size exceeds 16 MB. +// +// - Any individual items with keys exceeding the key length limits. For a +// partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 +// bytes. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations +// [Naming Rules and Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput, optFns ...func(*Options)) (*BatchWriteItemOutput, error) { if params == nil { params = &BatchWriteItemInput{} @@ -89,36 +117,47 @@ type BatchWriteItemInput struct { // A map of one or more table names or table ARNs and, for each table, a list of // operations to be performed ( DeleteRequest or PutRequest ). Each element in the // map consists of the following: + // // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: + // // - Key - A map of primary key attribute values that uniquely identify the item. // Each entry in this map consists of an attribute name and an attribute value. For // each primary key, you must provide all of the key attributes. For example, with // a simple primary key, you only need to provide a value for the partition key. // For a composite primary key, you must provide values for both the partition key // and the sort key. + // // - PutRequest - Perform a PutItem operation on the specified item. The item to // be put is identified by an Item subelement: + // // - Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be null; // string and binary type attributes must have lengths greater than zero; and set // type attributes must not be empty. Requests that contain empty values are - // rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. // // This member is required. RequestItems map[string][]types.WriteRequest // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -134,46 +173,62 @@ type BatchWriteItemInput struct { // Represents the output of a BatchWriteItem operation. type BatchWriteItemOutput struct { - // The capacity units consumed by the entire BatchWriteItem operation. Each - // element consists of: + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // // - TableName - The table that consumed the provisioned throughput. + // // - CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []types.ConsumedCapacity // A list of tables that were processed by BatchWriteItem and, for each table, // information about any item collections that were affected by individual - // DeleteItem or PutItem operations. Each entry consists of the following - // subelements: + // DeleteItem or PutItem operations. + // + // Each entry consists of the following subelements: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item. + // // - SizeEstimateRangeGB - An estimate of item collection size, expressed in GB. // This is a two-element array containing a lower bound and an upper bound for the // estimate. The estimate includes the size of all the items in the table, plus the // size of all attributes projected into all of the local secondary indexes on the // table. Use this estimate to measure whether a local secondary index is - // approaching its size limit. The estimate is subject to change over time; - // therefore, do not rely on the precision or accuracy of the estimate. + // approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics map[string][]types.ItemCollectionMetrics // A map of tables and requests against those tables that were not processed. The // UnprocessedItems value is in the same form as RequestItems , so you can provide // this value directly to a subsequent BatchWriteItem operation. For more - // information, see RequestItems in the Request Parameters section. Each - // UnprocessedItems entry consists of a table name or table ARN and, for that + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name or table ARN and, for that // table, a list of operations to perform ( DeleteRequest or PutRequest ). + // // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: + // // - Key - A map of primary key attribute values that uniquely identify the item. // Each entry in this map consists of an attribute name and an attribute value. + // // - PutRequest - Perform a PutItem operation on the specified item. The item to // be put is identified by an Item subelement: + // // - Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be null; // string and binary type attributes must have lengths greater than zero; and set // type attributes must not be empty. Requests that contain empty values will be - // rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // // If there are no unprocessed items remaining, the response contains an empty // UnprocessedItems map. UnprocessedItems map[string][]types.WriteRequest @@ -242,6 +297,12 @@ func (c *Client) addOperationBatchWriteItemMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpBatchWriteItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go index 19161558e0d..6f0e92ddd3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go @@ -12,23 +12,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a backup for an existing table. Each time you create an on-demand -// backup, the entire table data is backed up. There is no limit to the number of -// on-demand backups that can be taken. When you create an on-demand backup, a time -// marker of the request is cataloged, and the backup is created asynchronously, by -// applying all changes until the time of the request to the last full table -// snapshot. Backup requests are processed instantaneously and become available for -// restore within minutes. You can call CreateBackup at a maximum rate of 50 times -// per second. All backups in DynamoDB work without consuming any provisioned -// throughput on the table. If you submit a backup request on 2018-12-14 at -// 14:25:00, the backup is guaranteed to contain all data committed to the table up -// to 14:24:00, and data committed after 14:26:00 will not be. The backup might -// contain data modifications made between 14:24:00 and 14:26:00. On-demand backup -// does not support causal consistency. Along with data, the following are also -// included on the backups: +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed up. +// There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the time +// of the request to the last full table snapshot. Backup requests are processed +// instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput on +// the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is +// guaranteed to contain all data committed to the table up to 14:24:00, and data +// committed after 14:26:00 will not be. The backup might contain data +// modifications made between 14:24:00 and 14:26:00. On-demand backup does not +// support causal consistency. +// +// Along with data, the following are also included on the backups: +// // - Global secondary indexes (GSIs) +// // - Local secondary indexes (LSIs) +// // - Streams +// // - Provisioned read and write capacity func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) { if params == nil { @@ -130,6 +142,12 @@ func (c *Client) addOperationCreateBackupMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateBackupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go index 693a39d1eae..6bf152bdb9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go @@ -14,41 +14,57 @@ import ( // Creates a global table from an existing table. A global table creates a // replication relationship between two or more DynamoDB tables with the same table -// name in the provided Regions. This operation only applies to Version 2017.11.29 -// (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . If you want to add a new replica table to a global table, each of the -// following conditions must be true: +// name in the provided Regions. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// // - The table must have the same primary key as all of the other replicas. +// // - The table must have the same name as all of the other replicas. +// // - The table must have DynamoDB Streams enabled, with the stream containing // both the new and the old images of the item. +// // - None of the replica tables in the global table can contain any data. // // If global secondary indexes are specified, then the following conditions must // also be met: +// // - The global secondary indexes must have the same name. +// // - The global secondary indexes must have the same hash key and sort key (if // present). // // If local secondary indexes are specified, then the following conditions must // also be met: +// // - The local secondary indexes must have the same name. +// // - The local secondary indexes must have the same hash key and sort key (if // present). // // Write capacity settings should be set consistently across your replica tables // and secondary indexes. DynamoDB strongly recommends enabling auto scaling to // manage the write capacity settings for all of your global tables replicas and -// indexes. If you prefer to manage write capacity settings manually, you should -// provision equal replicated write capacity units to your replica tables. You -// should also provision equal replicated write capacity units to matching -// secondary indexes across your global table. +// indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should also +// provision equal replicated write capacity units to matching secondary indexes +// across your global table. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) CreateGlobalTable(ctx context.Context, params *CreateGlobalTableInput, optFns ...func(*Options)) (*CreateGlobalTableOutput, error) { if params == nil { params = &CreateGlobalTableInput{} @@ -148,6 +164,12 @@ func (c *Client) addOperationCreateGlobalTableMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateGlobalTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go index 92975ec6d3a..aee3578760e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go @@ -15,15 +15,19 @@ import ( // The CreateTable operation adds a new table to your account. In an Amazon Web // Services account, table names must be unique within each Region. That is, you // can have two tables with same name if you create the tables in different -// Regions. CreateTable is an asynchronous operation. Upon receiving a CreateTable -// request, DynamoDB immediately returns a response with a TableStatus of CREATING -// . After the table is created, DynamoDB sets the TableStatus to ACTIVE . You can -// perform read and write operations only on an ACTIVE table. You can optionally -// define secondary indexes on the new table, as part of the CreateTable -// operation. If you want to create multiple tables with secondary indexes on them, -// you must create the tables sequentially. Only one table with secondary indexes -// can be in the CREATING state at any given time. You can use the DescribeTable -// action to check the table status. +// Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING . After +// the table is created, DynamoDB sets the TableStatus to ACTIVE . You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of the +// CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table with +// secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. func (c *Client) CreateTable(ctx context.Context, params *CreateTableInput, optFns ...func(*Options)) (*CreateTableOutput, error) { if params == nil { params = &CreateTableInput{} @@ -49,25 +53,38 @@ type CreateTableInput struct { // Specifies the attributes that make up the primary key for a table or an index. // The attributes in KeySchema must also be defined in the AttributeDefinitions - // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) - // in the Amazon DynamoDB Developer Guide. Each KeySchemaElement in the array is - // composed of: + // array. For more information, see [Data Model]in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // // - AttributeName - The name of this key attribute. + // // - KeyType - The role that the key attribute will assume: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from the DynamoDB usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // // For a simple primary key (partition key), you must provide exactly one element - // with a KeyType of HASH . For a composite primary key (partition key and sort - // key), you must provide exactly two elements, in this order: The first element - // must have a KeyType of HASH , and the second element must have a KeyType of - // RANGE . For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) - // in the Amazon DynamoDB Developer Guide. + // with a KeyType of HASH . + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType of + // HASH , and the second element must have a KeyType of RANGE . + // + // For more information, see [Working with Tables] in the Amazon DynamoDB Developer Guide. + // + // [Data Model]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html + // [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key // // This member is required. KeySchema []types.KeySchemaElement @@ -80,12 +97,15 @@ type CreateTableInput struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html BillingMode types.BillingMode // Indicates whether deletion protection is to be enabled (true) or disabled @@ -94,23 +114,32 @@ type CreateTableInput struct { // One or more global secondary indexes (the maximum is 20) to be created on the // table. Each global secondary index in the array includes the following: + // // - IndexName - The name of the global secondary index. Must be unique only for // this table. + // // - KeySchema - Specifies the key schema for the global secondary index. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - ProvisionedThroughput - The provisioned throughput settings for the global // secondary index, consisting of read and write capacity units. GlobalSecondaryIndexes []types.GlobalSecondaryIndex @@ -118,21 +147,30 @@ type CreateTableInput struct { // One or more local secondary indexes (the maximum is 5) to be created on the // table. Each index is scoped to a given partition key value. There is a 10 GB // size limit per partition key value; otherwise, the size of a local secondary - // index is unconstrained. Each local secondary index in the array includes the - // following: + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // // - IndexName - The name of the local secondary index. Must be unique only for // this table. + // // - KeySchema - Specifies the key schema for the local secondary index. The key // schema must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed @@ -140,41 +178,61 @@ type CreateTableInput struct { // as two distinct attributes when determining the total. LocalSecondaryIndexes []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. If you set - // BillingMode as PROVISIONED , you must specify this property. If you set - // BillingMode as PAY_PER_REQUEST , you cannot specify this property. For current - // minimum and maximum provisioned throughput values, see Service, Account, and - // Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED , you must specify this property. If you + // set BillingMode as PAY_PER_REQUEST , you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *types.ProvisionedThroughput // An Amazon Web Services resource-based policy document in JSON format that will - // be attached to the table. When you attach a resource-based policy while creating - // a table, the policy creation is strongly consistent. The maximum size supported - // for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when - // calculating the size of a policy against this limit. You can’t request an - // increase for this limit. For a full list of all considerations that you should - // keep in mind while attaching a resource-based policy, see Resource-based policy - // considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) - // . + // be attached to the table. + // + // When you attach a resource-based policy while creating a table, the policy + // application is strongly consistent. + // + // The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. For a full list of all considerations that apply for resource-based + // policies, see [Resource-based policy considerations]. + // + // You need to specify the CreateTable and PutResourcePolicy IAM actions for + // authorizing a user to create a table with a resource-based policy. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html ResourcePolicy *string // Represents the settings used to enable server-side encryption. SSESpecification *types.SSESpecification // The settings for DynamoDB Streams on the table. These settings consist of: + // // - StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) // or disabled (false). + // // - StreamViewType - When an item in the table is modified, StreamViewType // determines what information is written to the table's stream. Valid values for // StreamViewType are: + // // - KEYS_ONLY - Only the key attributes of the modified item are written to the // stream. + // // - NEW_IMAGE - The entire item, as it appears after it was modified, is written // to the stream. + // // - OLD_IMAGE - The entire item, as it appeared before it was modified, is // written to the stream. + // // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are // written to the stream. StreamSpecification *types.StreamSpecification @@ -183,9 +241,9 @@ type CreateTableInput struct { // STANDARD_INFREQUENT_ACCESS . TableClass types.TableClass - // A list of key-value pairs to label the table. For more information, see Tagging - // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) - // . + // A list of key-value pairs to label the table. For more information, see [Tagging for DynamoDB]. + // + // [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html Tags []types.Tag noSmithyDocumentSerde @@ -261,6 +319,12 @@ func (c *Client) addOperationCreateTableMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go index 0d847ed08ad..8f381639e98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an existing backup of a table. You can call DeleteBackup at a maximum -// rate of 10 times per second. +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. func (c *Client) DeleteBackup(ctx context.Context, params *DeleteBackupInput, optFns ...func(*Options)) (*DeleteBackupOutput, error) { if params == nil { params = &DeleteBackupInput{} @@ -108,6 +109,12 @@ func (c *Client) addOperationDeleteBackupMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteBackupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go index eab33851345..8e56c3f2bbe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go @@ -14,13 +14,18 @@ import ( // Deletes a single item in a table by primary key. You can perform a conditional // delete operation that deletes the item if it exists, or if it has an expected -// attribute value. In addition to deleting an item, you can also return the item's -// attribute values in the same operation, using the ReturnValues parameter. +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// // Unless you specify conditions, the DeleteItem is an idempotent operation; // running it multiple times on the same item or attribute does not result in an -// error response. Conditional deletes are useful for deleting items only if -// specific conditions are met. If those conditions are met, DynamoDB performs the -// delete. Otherwise, the item is not deleted. +// error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns ...func(*Options)) (*DeleteItemOutput, error) { if params == nil { params = &DeleteItemInput{} @@ -40,10 +45,12 @@ func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns type DeleteItemInput struct { // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to delete. For the primary key, you must provide all of the key - // attributes. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide - // values for both the partition key and the sort key. + // key of the item to delete. + // + // For the primary key, you must provide all of the key attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -55,70 +62,111 @@ type DeleteItemInput struct { TableName *string // A condition that must be satisfied in order for a conditional DeleteItem to - // succeed. An expression can contain any of the following: + // succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information about condition expressions, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information about condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -130,20 +178,26 @@ type DeleteItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared before // they were deleted. For DeleteItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - The content of the old item is returned. + // // There is no additional cost associated with requesting a return value aside // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. The ReturnValues parameter is used by - // several DynamoDB operations; however, DeleteItem does not recognize any values - // other than NONE or ALL_OLD . + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD . ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for a DeleteItem - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -161,24 +215,30 @@ type DeleteItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Provisioned capacity mode]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the DeleteItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. @@ -245,6 +305,12 @@ func (c *Client) addOperationDeleteItemMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go index cd6720b0861..eabdff7ce2d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go @@ -12,13 +12,17 @@ import ( ) // Deletes the resource-based policy attached to the resource, which can be a -// table or stream. DeleteResourcePolicy is an idempotent operation; running it -// multiple times on the same resource doesn't result in an error response, unless -// you specify an ExpectedRevisionId , which will then return a -// PolicyNotFoundException . To make sure that you don't inadvertently lock -// yourself out of your own resources, the root principal in your Amazon Web -// Services account can perform DeleteResourcePolicy requests, even if your -// resource-based policy explicitly denies the root principal's access. +// table or stream. +// +// DeleteResourcePolicy is an idempotent operation; running it multiple times on +// the same resource doesn't result in an error response, unless you specify an +// ExpectedRevisionId , which will then return a PolicyNotFoundException . +// +// To make sure that you don't inadvertently lock yourself out of your own +// resources, the root principal in your Amazon Web Services account can perform +// DeleteResourcePolicy requests, even if your resource-based policy explicitly +// denies the root principal's access. +// // DeleteResourcePolicy is an asynchronous operation. If you issue a // GetResourcePolicy request immediately after running the DeleteResourcePolicy // request, DynamoDB might still return the deleted policy. This is because the @@ -61,9 +65,11 @@ type DeleteResourcePolicyInput struct { type DeleteResourcePolicyOutput struct { - // A unique string that represents the revision ID of the policy. If you are - // comparing revision IDs, make sure to always use string comparison logic. This - // value will be empty if you make a request against a resource without a policy. + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + // + // This value will be empty if you make a request against a resource without a + // policy. RevisionId *string // Metadata pertaining to the operation's result. @@ -130,6 +136,12 @@ func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go index 00d36f83d3c..e668566936e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go @@ -18,15 +18,24 @@ import ( // delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns // a ResourceInUseException . If the specified table does not exist, DynamoDB // returns a ResourceNotFoundException . If table is already in the DELETING -// state, no error is returned. This operation only applies to Version 2019.11.21 -// (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. DynamoDB might continue to accept data read and write -// operations, such as GetItem and PutItem , on a table in the DELETING state -// until the table deletion is complete. When you delete a table, any indexes on -// that table are also deleted. If you have DynamoDB Streams enabled on the table, -// then the corresponding stream on that table goes into the DISABLED state, and -// the stream is automatically deleted after 24 hours. Use the DescribeTable -// action to check the status of the table. +// state, no error is returned. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem , on a table in the DELETING state until the table deletion +// is complete. For the full list of table states, see [TableStatus]. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is +// automatically deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// [TableStatus]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html#DDB-Type-TableDescription-TableStatus func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optFns ...func(*Options)) (*DeleteTableOutput, error) { if params == nil { params = &DeleteTableInput{} @@ -124,6 +133,12 @@ func (c *Client) addOperationDeleteTableMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go index 845ccd7b476..d5a1a308cc3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes an existing backup of a table. You can call DescribeBackup at a -// maximum rate of 10 times per second. +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. func (c *Client) DescribeBackup(ctx context.Context, params *DescribeBackupInput, optFns ...func(*Options)) (*DescribeBackupOutput, error) { if params == nil { params = &DescribeBackupInput{} @@ -108,6 +109,12 @@ func (c *Client) addOperationDescribeBackupMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeBackupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go index 2f60a65d3d9..44b6c1c2c62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go @@ -15,12 +15,16 @@ import ( // Checks the status of continuous backups and point in time recovery on the // specified table. Continuous backups are ENABLED on all tables at table // creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will -// be set to ENABLED. After continuous backups and point in time recovery are -// enabled, you can restore to any point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime . LatestRestorableDateTime is typically 5 minutes -// before the current time. You can restore your table to any point in time during -// the last 35 days. You can call DescribeContinuousBackups at a maximum rate of -// 10 times per second. +// be set to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time during the last 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per second. func (c *Client) DescribeContinuousBackups(ctx context.Context, params *DescribeContinuousBackupsInput, optFns ...func(*Options)) (*DescribeContinuousBackupsOutput, error) { if params == nil { params = &DescribeContinuousBackupsInput{} @@ -39,8 +43,10 @@ func (c *Client) DescribeContinuousBackups(ctx context.Context, params *Describe type DescribeContinuousBackupsInput struct { // Name of the table for which the customer wants to check the continuous backups - // and point in time recovery settings. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. + // and point in time recovery settings. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. // // This member is required. TableName *string @@ -118,6 +124,12 @@ func (c *Client) addOperationDescribeContinuousBackupsMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeContinuousBackupsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go index c983b639659..1dc102910d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go @@ -51,15 +51,20 @@ type DescribeContributorInsightsOutput struct { // Current status of contributor insights. ContributorInsightsStatus types.ContributorInsightsStatus - // Returns information about the last failure that was encountered. The most - // common exceptions for a FAILED status are: + // Returns information about the last failure that was encountered. + // + // The most common exceptions for a FAILED status are: + // // - LimitExceededException - Per-account Amazon CloudWatch Contributor Insights // rule limit reached. Please disable Contributor Insights for other tables/indexes // OR disable Contributor Insights rules before retrying. + // // - AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot // be modified due to insufficient permissions. + // // - AccessDeniedException - Failed to create service-linked role for // Contributor Insights due to insufficient permissions. + // // - InternalServerError - Failed to create Amazon CloudWatch Contributor // Insights rules. Please retry request. FailureException *types.FailureException @@ -134,6 +139,12 @@ func (c *Client) addOperationDescribeContributorInsightsMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeContributorInsightsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go index b542bfcc65d..f3cb6578f5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go @@ -12,8 +12,9 @@ import ( ) // Returns the regional endpoint information. For more information on policy -// permissions, please see Internetwork traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints) -// . +// permissions, please see [Internetwork traffic privacy]. +// +// [Internetwork traffic privacy]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) { if params == nil { params = &DescribeEndpointsInput{} @@ -101,6 +102,12 @@ func (c *Client) addOperationDescribeEndpointsMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEndpoints(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go index 6bb412cb675..50c046d4f08 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go @@ -103,6 +103,12 @@ func (c *Client) addOperationDescribeExportMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeExportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go index 541772f651b..5264d3d21e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go @@ -12,15 +12,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns information about the specified global table. This operation only -// applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// Returns information about the specified global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) DescribeGlobalTable(ctx context.Context, params *DescribeGlobalTableInput, optFns ...func(*Options)) (*DescribeGlobalTableOutput, error) { if params == nil { params = &DescribeGlobalTableInput{} @@ -115,6 +119,12 @@ func (c *Client) addOperationDescribeGlobalTableMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeGlobalTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go index 031d4381235..c0d03e0642d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go @@ -12,15 +12,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes Region-specific settings for a global table. This operation only -// applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// Describes Region-specific settings for a global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) DescribeGlobalTableSettings(ctx context.Context, params *DescribeGlobalTableSettingsInput, optFns ...func(*Options)) (*DescribeGlobalTableSettingsOutput, error) { if params == nil { params = &DescribeGlobalTableSettingsInput{} @@ -118,6 +122,12 @@ func (c *Client) addOperationDescribeGlobalTableSettingsMiddlewares(stack *middl if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeGlobalTableSettingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go index 2b3961581a1..d5c038becc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go @@ -29,7 +29,7 @@ func (c *Client) DescribeImport(ctx context.Context, params *DescribeImportInput type DescribeImportInput struct { - // The Amazon Resource Name (ARN) associated with the table you're importing to. + // The Amazon Resource Name (ARN) associated with the table you're importing to. // // This member is required. ImportArn *string @@ -39,7 +39,7 @@ type DescribeImportInput struct { type DescribeImportOutput struct { - // Represents the properties of the table created for the import, and parameters + // Represents the properties of the table created for the import, and parameters // of the import. The import parameters include import status, how many items were // processed, and how many errors were encountered. // @@ -107,6 +107,12 @@ func (c *Client) addOperationDescribeImportMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeImportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go index 1c29302abdd..f56f025991f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go @@ -111,6 +111,12 @@ func (c *Client) addOperationDescribeKinesisStreamingDestinationMiddlewares(stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeKinesisStreamingDestinationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go index cd60d4f8de7..a66ff0f1d50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go @@ -13,42 +13,64 @@ import ( // Returns the current provisioned-capacity quotas for your Amazon Web Services // account in a Region, both for the Region as a whole and for any one DynamoDB -// table that you create there. When you establish an Amazon Web Services account, -// the account has initial quotas on the maximum read capacity units and write -// capacity units that you can provision across all of your DynamoDB tables in a -// given Region. Also, there are per-table quotas that apply when you create a -// table there. For more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// page in the Amazon DynamoDB Developer Guide. Although you can increase these -// quotas by filing a case at Amazon Web Services Support Center (https://console.aws.amazon.com/support/home#/) -// , obtaining the increase is not instantaneous. The DescribeLimits action lets -// you write code to compare the capacity you are currently using to those quotas -// imposed by your account so that you have enough time to apply for an increase -// before you hit a quota. For example, you could use one of the Amazon Web -// Services SDKs to do the following: +// table that you create there. +// +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you can +// provision across all of your DynamoDB tables in a given Region. Also, there are +// per-table quotas that apply when you create a table there. For more information, +// see [Service, Account, and Table Quotas]page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these quotas by filing a case at [Amazon Web Services Support Center], obtaining the +// increase is not instantaneous. The DescribeLimits action lets you write code to +// compare the capacity you are currently using to those quotas imposed by your +// account so that you have enough time to apply for an increase before you hit a +// quota. +// +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: +// // - Call DescribeLimits for a particular Region to obtain your current account // quotas on provisioned capacity there. +// // - Create a variable to hold the aggregate read capacity units provisioned for // all your tables in that Region, and one to hold the aggregate write capacity // units. Zero them both. +// // - Call ListTables to obtain a list of all your DynamoDB tables. +// // - For each table name listed by ListTables , do the following: +// // - Call DescribeTable with the table name. +// // - Use the data returned by DescribeTable to add the read capacity units and // write capacity units provisioned for the table itself to your variables. +// // - If the table has one or more global secondary indexes (GSIs), loop over // these GSIs and add their provisioned capacity values to your variables as well. +// // - Report the account quotas for that Region returned by DescribeLimits , along // with the total current provisioned capacity levels you have calculated. // // This will let you see whether you are getting close to your account-level -// quotas. The per-table quotas apply only when you are creating a new table. They +// quotas. +// +// The per-table quotas apply only when you are creating a new table. They // restrict the sum of the provisioned capacity of the new table itself and all its -// global secondary indexes. For existing tables and their GSIs, DynamoDB doesn't -// let you increase provisioned capacity extremely rapidly, but the only quota that -// applies is that the aggregate provisioned capacity over all your tables and GSIs -// cannot exceed either of the per-account quotas. DescribeLimits should only be -// called periodically. You can expect throttling errors if you call it more than -// once in a minute. The DescribeLimits Request element has no content. +// global secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase +// provisioned capacity extremely rapidly, but the only quota that applies is that +// the aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account quotas. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html +// [Amazon Web Services Support Center]: https://console.aws.amazon.com/support/home#/ func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) { if params == nil { params = &DescribeLimitsInput{} @@ -154,6 +176,12 @@ func (c *Client) addOperationDescribeLimitsMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLimits(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go index 29861d9b443..b89eade6d88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go @@ -13,18 +13,21 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" + jmespath "github.com/jmespath/go-jmespath" "time" ) // Returns information about the table, including the current status of the table, -// when it was created, the primary key schema, and any indexes on the table. This -// operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. If you issue a DescribeTable request immediately after a -// CreateTable request, DynamoDB might return a ResourceNotFoundException . This is -// because DescribeTable uses an eventually consistent query, and the metadata for -// your table might not be available at that moment. Wait for a few seconds, and -// then try the DescribeTable request again. +// when it was created, the primary key schema, and any indexes on the table. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException . This is because +// DescribeTable uses an eventually consistent query, and the metadata for your +// table might not be available at that moment. Wait for a few seconds, and then +// try the DescribeTable request again. func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, optFns ...func(*Options)) (*DescribeTableOutput, error) { if params == nil { params = &DescribeTableInput{} @@ -122,6 +125,12 @@ func (c *Client) addOperationDescribeTableMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeTableValidationMiddleware(stack); err != nil { return err } @@ -152,56 +161,6 @@ func (c *Client) addOperationDescribeTableMiddlewares(stack *middleware.Stack, o return nil } -func addOpDescribeTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { - return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ - Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ - func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { - opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS - opt.Logger = o.Logger - }, - }, - DiscoverOperation: c.fetchOpDescribeTableDiscoverEndpoint, - EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, - EndpointDiscoveryRequired: false, - Region: o.Region, - }, "ResolveEndpointV2", middleware.After) -} - -func (c *Client) fetchOpDescribeTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { - input := getOperationInput(ctx) - in, ok := input.(*DescribeTableInput) - if !ok { - return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) - } - _ = in - - identifierMap := make(map[string]string, 0) - identifierMap["sdk#Region"] = region - - key := fmt.Sprintf("DynamoDB.%v", identifierMap) - - if v, ok := c.endpointCache.Get(key); ok { - return v, nil - } - - discoveryOperationInput := &DescribeEndpointsInput{} - - opt := internalEndpointDiscovery.DiscoverEndpointOptions{} - for _, fn := range optFns { - fn(&opt) - } - - go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) - return internalEndpointDiscovery.WeightedAddress{}, nil -} - -// DescribeTableAPIClient is a client that implements the DescribeTable operation. -type DescribeTableAPIClient interface { - DescribeTable(context.Context, *DescribeTableInput, ...func(*Options)) (*DescribeTableOutput, error) -} - -var _ DescribeTableAPIClient = (*Client)(nil) - // TableExistsWaiterOptions are waiter options for TableExistsWaiter type TableExistsWaiterOptions struct { @@ -234,12 +193,13 @@ type TableExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) } @@ -315,7 +275,13 @@ func (w *TableExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeT } out, err := w.client.DescribeTable(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -412,12 +378,13 @@ type TableNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) } @@ -494,7 +461,13 @@ func (w *TableNotExistsWaiter) WaitForOutput(ctx context.Context, params *Descri } out, err := w.client.DescribeTable(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -542,6 +515,56 @@ func tableNotExistsStateRetryable(ctx context.Context, input *DescribeTableInput return true, nil } +func addOpDescribeTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// DescribeTableAPIClient is a client that implements the DescribeTable operation. +type DescribeTableAPIClient interface { + DescribeTable(context.Context, *DescribeTableInput, ...func(*Options)) (*DescribeTableOutput, error) +} + +var _ DescribeTableAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeTable(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go index 0008f9d1e83..3eee5ea572d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go @@ -12,8 +12,9 @@ import ( ) // Describes auto scaling settings across replicas of the global table at once. -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). func (c *Client) DescribeTableReplicaAutoScaling(ctx context.Context, params *DescribeTableReplicaAutoScalingInput, optFns ...func(*Options)) (*DescribeTableReplicaAutoScalingOutput, error) { if params == nil { params = &DescribeTableReplicaAutoScalingInput{} @@ -106,6 +107,12 @@ func (c *Client) addOperationDescribeTableReplicaAutoScalingMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeTableReplicaAutoScalingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go index 3845703de28..d3368ee57da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go @@ -108,6 +108,12 @@ func (c *Client) addOperationDescribeTimeToLiveMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeTimeToLiveValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go index 1d72751c58f..e8bd9dff6d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go @@ -126,6 +126,12 @@ func (c *Client) addOperationDisableKinesisStreamingDestinationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDisableKinesisStreamingDestinationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go index 7d0ff0d25de..a63c4cdaa8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go @@ -128,6 +128,12 @@ func (c *Client) addOperationEnableKinesisStreamingDestinationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpEnableKinesisStreamingDestinationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go index df979dfe5cf..b1a7ba049d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go @@ -12,12 +12,15 @@ import ( ) // This operation allows you to perform reads and singleton writes on data stored -// in DynamoDB, using PartiQL. For PartiQL reads ( SELECT statement), if the total -// number of processed items exceeds the maximum dataset size limit of 1 MB, the -// read stops and results are returned to the user as a LastEvaluatedKey value to -// continue the read in a subsequent operation. If the filter criteria in WHERE -// clause does not match any data, the read will return an empty result set. A -// single SELECT statement response can return up to the maximum number of items +// in DynamoDB, using PartiQL. +// +// For PartiQL reads ( SELECT statement), if the total number of processed items +// exceeds the maximum dataset size limit of 1 MB, the read stops and results are +// returned to the user as a LastEvaluatedKey value to continue the read in a +// subsequent operation. If the filter criteria in WHERE clause does not match any +// data, the read will return an empty result set. +// +// A single SELECT statement response can return up to the maximum number of items // (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any // filtering to the results using WHERE clause). If LastEvaluatedKey is present in // the response, you need to paginate the result set. If NextToken is present, you @@ -67,20 +70,27 @@ type ExecuteStatementInput struct { // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // An optional parameter that returns the item attributes for an ExecuteStatement - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -91,8 +101,10 @@ type ExecuteStatementOutput struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table and // any indexes involved in the operation. ConsumedCapacity is only returned if the - // request asked for it. For more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB + // Developer Guide. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html ConsumedCapacity *types.ConsumedCapacity // If a read operation was used, this property will contain the result of the read @@ -175,6 +187,12 @@ func (c *Client) addOperationExecuteStatementMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpExecuteStatementValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go index 44b71fbcb97..89d8d043e82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go @@ -12,12 +12,14 @@ import ( ) // This operation allows you to perform transactional reads or writes on data -// stored in DynamoDB, using PartiQL. The entire transaction must consist of either -// read statements or write statements, you cannot mix both in one transaction. The -// EXISTS function is an exception and can be used to check the condition of -// specific attributes of the item in a similar manner to ConditionCheck in the -// TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) -// API. +// stored in DynamoDB, using PartiQL. +// +// The entire transaction must consist of either read statements or write +// statements, you cannot mix both in one transaction. The EXISTS function is an +// exception and can be used to check the condition of specific attributes of the +// item in a similar manner to ConditionCheck in the [TransactWriteItems] API. +// +// [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems func (c *Client) ExecuteTransaction(ctx context.Context, params *ExecuteTransactionInput, optFns ...func(*Options)) (*ExecuteTransactionOutput, error) { if params == nil { params = &ExecuteTransactionInput{} @@ -45,10 +47,10 @@ type ExecuteTransactionInput struct { ClientRequestToken *string // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response. For more information, see - // TransactGetItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html) - // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html) - // . + // consumption that is returned in the response. For more information, see [TransactGetItems]and [TransactWriteItems]. + // + // [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html + // [TransactGetItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html ReturnConsumedCapacity types.ReturnConsumedCapacity noSmithyDocumentSerde @@ -124,6 +126,12 @@ func (c *Client) addOperationExecuteTransactionMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opExecuteTransactionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go index 297f6de3674..62becd46ef5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go @@ -44,12 +44,15 @@ type ExportTableToPointInTimeInput struct { // Providing a ClientToken makes the call to ExportTableToPointInTimeInput // idempotent, meaning that multiple identical calls have the same effect as one - // single call. A client token is valid for 8 hours after the first request that - // uses it is completed. After 8 hours, any request with the same client token is - // treated as a new request. Do not resubmit the same request with the same client - // token for more than 8 hours, or the result might not be idempotent. If you - // submit a request with the same client token but a change in other parameters - // within the 8-hour idempotency window, DynamoDB returns an + // single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an // ImportConflictException . ClientToken *string @@ -72,8 +75,10 @@ type ExportTableToPointInTimeInput struct { IncrementalExportSpecification *types.IncrementalExportSpecification // The ID of the Amazon Web Services account that owns the bucket the export will - // be stored in. S3BucketOwner is a required parameter when exporting to a S3 - // bucket in another account. + // be stored in. + // + // S3BucketOwner is a required parameter when exporting to a S3 bucket in another + // account. S3BucketOwner *string // The Amazon S3 bucket prefix to use as the file name and path of the exported @@ -82,7 +87,9 @@ type ExportTableToPointInTimeInput struct { // Type of encryption used on the bucket where export data will be stored. Valid // values for S3SseAlgorithm are: + // // - AES256 - server-side encryption with Amazon S3 managed keys + // // - KMS - server-side encryption with KMS managed keys S3SseAlgorithm types.S3SseAlgorithm @@ -159,6 +166,12 @@ func (c *Client) addOperationExportTableToPointInTimeMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opExportTableToPointInTimeMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go index 6d5ef8eca93..e8ccb81cb7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go @@ -14,11 +14,12 @@ import ( // The GetItem operation returns a set of attributes for the item with the given // primary key. If there is no matching item, GetItem does not return any data and -// there will be no Item element in the response. GetItem provides an eventually -// consistent read by default. If your application requires a strongly consistent -// read, set ConsistentRead to true . Although a strongly consistent read might -// take more time than an eventually consistent read, it always returns the last -// updated value. +// there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true . Although a +// strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...func(*Options)) (*GetItemOutput, error) { if params == nil { params = &GetItemInput{} @@ -38,10 +39,12 @@ func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...fu type GetItemInput struct { // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to retrieve. For the primary key, you must provide all of the - // attributes. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide - // values for both the partition key and the sort key. + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -53,8 +56,9 @@ type GetItemInput struct { TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // Determines the read consistency model: If set to true , then the operation uses @@ -64,46 +68,67 @@ type GetItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the expression must be separated by commas. If no attribute names - // are specified, then all attributes are returned. If any of the requested - // attributes are not found, they do not appear in the result. For more - // information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -117,8 +142,9 @@ type GetItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) - // in the Amazon DynamoDB Developer Guide. + // information, see [Capacity unit consumption for read operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption ConsumedCapacity *types.ConsumedCapacity // A map of attribute names to AttributeValue objects, as specified by @@ -189,6 +215,12 @@ func (c *Client) addOperationGetItemMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go index baadeb90f2e..9bec60243ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go @@ -12,28 +12,35 @@ import ( ) // Returns the resource-based policy document attached to the resource, which can -// be a table or stream, in JSON format. GetResourcePolicy follows an eventually -// consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) -// model. The following list describes the outcomes when you issue the -// GetResourcePolicy request immediately after issuing another request: +// be a table or stream, in JSON format. +// +// GetResourcePolicy follows an [eventually consistent] model. The following list describes the outcomes +// when you issue the GetResourcePolicy request immediately after issuing another +// request: +// // - If you issue a GetResourcePolicy request immediately after a // PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException . +// // - If you issue a GetResourcePolicy request immediately after a // DeleteResourcePolicy request, DynamoDB might return the policy that was // present before the deletion request. +// // - If you issue a GetResourcePolicy request immediately after a CreateTable // request, which includes a resource-based policy, DynamoDB might return a // ResourceNotFoundException or a PolicyNotFoundException . // // Because GetResourcePolicy uses an eventually consistent query, the metadata for // your policy or table might not be available at that moment. Wait for a few -// seconds, and then retry the GetResourcePolicy request. After a GetResourcePolicy -// request returns a policy created using the PutResourcePolicy request, you can -// assume the policy will start getting applied in the authorization of requests to -// the resource. Because this process is eventually consistent, it will take some -// time to apply the policy to all requests to a resource. Policies that you attach -// while creating a table using the CreateTable request will always be applied to -// all requests for that table. +// seconds, and then retry the GetResourcePolicy request. +// +// After a GetResourcePolicy request returns a policy created using the +// PutResourcePolicy request, the policy will be applied in the authorization of +// requests to the resource. Because this process is eventually consistent, it will +// take some time to apply the policy to all requests to a resource. Policies that +// you attach while creating a table using the CreateTable request will always be +// applied to all requests for that table. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { if params == nil { params = &GetResourcePolicyInput{} @@ -66,7 +73,7 @@ type GetResourcePolicyOutput struct { // table or stream, in JSON format. Policy *string - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string @@ -134,6 +141,12 @@ func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go index fd134a68b46..f0e2a4310ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go @@ -29,13 +29,13 @@ func (c *Client) ImportTable(ctx context.Context, params *ImportTableInput, optF type ImportTableInput struct { - // The format of the source data. Valid values for ImportFormat are CSV , + // The format of the source data. Valid values for ImportFormat are CSV , // DYNAMODB_JSON or ION . // // This member is required. InputFormat types.InputFormat - // The S3 bucket that provides the source for the import. + // The S3 bucket that provides the source for the import. // // This member is required. S3BucketSource *types.S3BucketSource @@ -46,19 +46,22 @@ type ImportTableInput struct { TableCreationParameters *types.TableCreationParameters // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning - // that multiple identical calls have the same effect as one single call. A client - // token is valid for 8 hours after the first request that uses it is completed. - // After 8 hours, any request with the same client token is treated as a new - // request. Do not resubmit the same request with the same client token for more - // than 8 hours, or the result might not be idempotent. If you submit a request - // with the same client token but a change in other parameters within the 8-hour - // idempotency window, DynamoDB returns an IdempotentParameterMismatch exception. + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an + // IdempotentParameterMismatch exception. ClientToken *string - // Type of compression to be used on the input coming from the imported table. + // Type of compression to be used on the input coming from the imported table. InputCompressionType types.InputCompressionType - // Additional properties that specify how the input is formatted, + // Additional properties that specify how the input is formatted, InputFormatOptions *types.InputFormatOptions noSmithyDocumentSerde @@ -66,7 +69,7 @@ type ImportTableInput struct { type ImportTableOutput struct { - // Represents the properties of the table created for the import, and parameters + // Represents the properties of the table created for the import, and parameters // of the import. The import parameters include import status, how many items were // processed, and how many errors were encountered. // @@ -134,6 +137,12 @@ func (c *Client) addOperationImportTableMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opImportTableMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go index 50a5ca3f9d9..85c4e3d9e71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go @@ -17,12 +17,17 @@ import ( // and weren't made with Amazon Web Services Backup. To list these backups for a // given table, specify TableName . ListBackups returns a paginated list of // results with at most 1 MB worth of items in a page. You can also specify a -// maximum number of entries to be returned in a page. In the request, start time -// is inclusive, but end time is exclusive. Note that these boundaries are for the -// time at which the original backup was requested. You can call ListBackups a -// maximum of five times per second. If you want to retrieve the complete list of -// backups made with Amazon Web Services Backup, use the Amazon Web Services -// Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html) +// maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note that +// these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// If you want to retrieve the complete list of backups made with Amazon Web +// Services Backup, use the [Amazon Web Services Backup list API.] +// +// [Amazon Web Services Backup list API.]: https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optFns ...func(*Options)) (*ListBackupsOutput, error) { if params == nil { params = &ListBackupsInput{} @@ -40,11 +45,15 @@ func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optF type ListBackupsInput struct { - // The backups from the table specified by BackupType are listed. Where BackupType - // can be: + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // // - USER - On-demand backup created by you. (The default setting if no other // backup types are specified.) + // // - SYSTEM - On-demand backup automatically created by DynamoDB. + // // - ALL - All types of on-demand backups (USER and SYSTEM). BackupType types.BackupTypeFilter @@ -78,14 +87,17 @@ type ListBackupsOutput struct { // List of BackupSummary objects. BackupSummaries []types.BackupSummary - // The ARN of the backup last evaluated when the current page of results was + // The ARN of the backup last evaluated when the current page of results was // returned, inclusive of the current page of results. This value may be specified // as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch - // the next page of results. If LastEvaluatedBackupArn is empty, then the last - // page of results has been processed and there are no more results to be - // retrieved. If LastEvaluatedBackupArn is not empty, this may or may not indicate - // that there is more data to be returned. All results are guaranteed to have been - // returned if and only if no value for LastEvaluatedBackupArn is returned. + // the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that there + // is more data to be returned. All results are guaranteed to have been returned if + // and only if no value for LastEvaluatedBackupArn is returned. LastEvaluatedBackupArn *string // Metadata pertaining to the operation's result. @@ -152,6 +164,12 @@ func (c *Client) addOperationListBackupsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBackups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go index da9825d50bc..98d42348efb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go @@ -112,6 +112,12 @@ func (c *Client) addOperationListContributorInsightsMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListContributorInsights(options.Region), middleware.Before); err != nil { return err } @@ -139,14 +145,6 @@ func (c *Client) addOperationListContributorInsightsMiddlewares(stack *middlewar return nil } -// ListContributorInsightsAPIClient is a client that implements the -// ListContributorInsights operation. -type ListContributorInsightsAPIClient interface { - ListContributorInsights(context.Context, *ListContributorInsightsInput, ...func(*Options)) (*ListContributorInsightsOutput, error) -} - -var _ ListContributorInsightsAPIClient = (*Client)(nil) - // ListContributorInsightsPaginatorOptions is the paginator options for // ListContributorInsights type ListContributorInsightsPaginatorOptions struct { @@ -208,6 +206,9 @@ func (p *ListContributorInsightsPaginator) NextPage(ctx context.Context, optFns params.MaxResults = p.options.Limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListContributorInsights(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -227,6 +228,14 @@ func (p *ListContributorInsightsPaginator) NextPage(ctx context.Context, optFns return result, nil } +// ListContributorInsightsAPIClient is a client that implements the +// ListContributorInsights operation. +type ListContributorInsightsAPIClient interface { + ListContributorInsights(context.Context, *ListContributorInsightsInput, ...func(*Options)) (*ListContributorInsightsOutput, error) +} + +var _ ListContributorInsightsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go index 5bf0880441c..703fde9214f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go @@ -113,6 +113,12 @@ func (c *Client) addOperationListExportsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListExports(options.Region), middleware.Before); err != nil { return err } @@ -140,13 +146,6 @@ func (c *Client) addOperationListExportsMiddlewares(stack *middleware.Stack, opt return nil } -// ListExportsAPIClient is a client that implements the ListExports operation. -type ListExportsAPIClient interface { - ListExports(context.Context, *ListExportsInput, ...func(*Options)) (*ListExportsOutput, error) -} - -var _ ListExportsAPIClient = (*Client)(nil) - // ListExportsPaginatorOptions is the paginator options for ListExports type ListExportsPaginatorOptions struct { // Maximum number of results to return per page. @@ -210,6 +209,9 @@ func (p *ListExportsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListExports(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +231,13 @@ func (p *ListExportsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt return result, nil } +// ListExportsAPIClient is a client that implements the ListExports operation. +type ListExportsAPIClient interface { + ListExports(context.Context, *ListExportsInput, ...func(*Options)) (*ListExportsOutput, error) +} + +var _ ListExportsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListExports(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go index 75fb71ef6f7..910b20a6f0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go @@ -12,15 +12,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists all global tables that have a replica in the specified Region. This -// operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// Lists all global tables that have a replica in the specified Region. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) ListGlobalTables(ctx context.Context, params *ListGlobalTablesInput, optFns ...func(*Options)) (*ListGlobalTablesOutput, error) { if params == nil { params = &ListGlobalTablesInput{} @@ -42,10 +46,12 @@ type ListGlobalTablesInput struct { ExclusiveStartGlobalTableName *string // The maximum number of table names to return, if the parameter is not specified - // DynamoDB defaults to 100. If the number of global tables DynamoDB finds reaches - // this limit, it stops the operation and returns the table names collected up to - // that point, with a table name in the LastEvaluatedGlobalTableName to apply in a - // subsequent operation to the ExclusiveStartGlobalTableName parameter. + // DynamoDB defaults to 100. + // + // If the number of global tables DynamoDB finds reaches this limit, it stops the + // operation and returns the table names collected up to that point, with a table + // name in the LastEvaluatedGlobalTableName to apply in a subsequent operation to + // the ExclusiveStartGlobalTableName parameter. Limit *int32 // Lists the global tables in a specific Region. @@ -126,6 +132,12 @@ func (c *Client) addOperationListGlobalTablesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGlobalTables(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go index 7d4e34fd649..9a169326445 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go @@ -29,15 +29,15 @@ func (c *Client) ListImports(ctx context.Context, params *ListImportsInput, optF type ListImportsInput struct { - // An optional string that, if supplied, must be copied from the output of a + // An optional string that, if supplied, must be copied from the output of a // previous call to ListImports . When provided in this manner, the API fetches the // next page of results. NextToken *string - // The number of ImportSummary objects returned in a single page. + // The number of ImportSummary objects returned in a single page. PageSize *int32 - // The Amazon Resource Name (ARN) associated with the table that was imported to. + // The Amazon Resource Name (ARN) associated with the table that was imported to. TableArn *string noSmithyDocumentSerde @@ -45,10 +45,10 @@ type ListImportsInput struct { type ListImportsOutput struct { - // A list of ImportSummary objects. + // A list of ImportSummary objects. ImportSummaryList []types.ImportSummary - // If this value is returned, there are additional results to be displayed. To + // If this value is returned, there are additional results to be displayed. To // retrieve them, call ListImports again, with NextToken set to this value. NextToken *string @@ -113,6 +113,12 @@ func (c *Client) addOperationListImportsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImports(options.Region), middleware.Before); err != nil { return err } @@ -140,16 +146,9 @@ func (c *Client) addOperationListImportsMiddlewares(stack *middleware.Stack, opt return nil } -// ListImportsAPIClient is a client that implements the ListImports operation. -type ListImportsAPIClient interface { - ListImports(context.Context, *ListImportsInput, ...func(*Options)) (*ListImportsOutput, error) -} - -var _ ListImportsAPIClient = (*Client)(nil) - // ListImportsPaginatorOptions is the paginator options for ListImports type ListImportsPaginatorOptions struct { - // The number of ImportSummary objects returned in a single page. + // The number of ImportSummary objects returned in a single page. Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -210,6 +209,9 @@ func (p *ListImportsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt } params.PageSize = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListImports(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +231,13 @@ func (p *ListImportsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt return result, nil } +// ListImportsAPIClient is a client that implements the ListImports operation. +type ListImportsAPIClient interface { + ListImports(context.Context, *ListImportsInput, ...func(*Options)) (*ListImportsOutput, error) +} + +var _ ListImportsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListImports(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go index f59311d6006..70912ae72ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go @@ -49,16 +49,18 @@ type ListTablesOutput struct { // The name of the last table in the current page of results. Use this value as // the ExclusiveStartTableName in a new request to obtain the next page of - // results, until all the table names are returned. If you do not receive a - // LastEvaluatedTableName value in the response, this means that there are no more - // table names to be retrieved. + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. LastEvaluatedTableName *string // The names of the tables associated with the current account at the current - // endpoint. The maximum size of this array is 100. If LastEvaluatedTableName also - // appears in the output, you can use this value as the ExclusiveStartTableName - // parameter in a subsequent ListTables request and obtain the next page of - // results. + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value as + // the ExclusiveStartTableName parameter in a subsequent ListTables request and + // obtain the next page of results. TableNames []string // Metadata pertaining to the operation's result. @@ -125,6 +127,12 @@ func (c *Client) addOperationListTablesMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTables(options.Region), middleware.Before); err != nil { return err } @@ -152,56 +160,6 @@ func (c *Client) addOperationListTablesMiddlewares(stack *middleware.Stack, opti return nil } -func addOpListTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { - return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ - Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ - func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { - opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS - opt.Logger = o.Logger - }, - }, - DiscoverOperation: c.fetchOpListTablesDiscoverEndpoint, - EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, - EndpointDiscoveryRequired: false, - Region: o.Region, - }, "ResolveEndpointV2", middleware.After) -} - -func (c *Client) fetchOpListTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { - input := getOperationInput(ctx) - in, ok := input.(*ListTablesInput) - if !ok { - return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) - } - _ = in - - identifierMap := make(map[string]string, 0) - identifierMap["sdk#Region"] = region - - key := fmt.Sprintf("DynamoDB.%v", identifierMap) - - if v, ok := c.endpointCache.Get(key); ok { - return v, nil - } - - discoveryOperationInput := &DescribeEndpointsInput{} - - opt := internalEndpointDiscovery.DiscoverEndpointOptions{} - for _, fn := range optFns { - fn(&opt) - } - - go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) - return internalEndpointDiscovery.WeightedAddress{}, nil -} - -// ListTablesAPIClient is a client that implements the ListTables operation. -type ListTablesAPIClient interface { - ListTables(context.Context, *ListTablesInput, ...func(*Options)) (*ListTablesOutput, error) -} - -var _ ListTablesAPIClient = (*Client)(nil) - // ListTablesPaginatorOptions is the paginator options for ListTables type ListTablesPaginatorOptions struct { // A maximum number of table names to return. If this parameter is not specified, @@ -266,6 +224,9 @@ func (p *ListTablesPaginator) NextPage(ctx context.Context, optFns ...func(*Opti } params.Limit = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListTables(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -285,6 +246,56 @@ func (p *ListTablesPaginator) NextPage(ctx context.Context, optFns ...func(*Opti return result, nil } +func addOpListTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpListTablesDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpListTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ListTablesInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// ListTablesAPIClient is a client that implements the ListTables operation. +type ListTablesAPIClient interface { + ListTables(context.Context, *ListTablesInput, ...func(*Options)) (*ListTablesOutput, error) +} + +var _ ListTablesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListTables(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go index 8f604bb024d..801b277f768 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go @@ -13,9 +13,12 @@ import ( ) // List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource -// up to 10 times per second, per account. For an overview on tagging DynamoDB -// resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) ListTagsOfResource(ctx context.Context, params *ListTagsOfResourceInput, optFns ...func(*Options)) (*ListTagsOfResourceOutput, error) { if params == nil { params = &ListTagsOfResourceInput{} @@ -120,6 +123,12 @@ func (c *Client) addOperationListTagsOfResourceMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListTagsOfResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go index 39336006bd7..ff2d36cad88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go @@ -18,18 +18,28 @@ import ( // conditional put operation (add a new item if one with the specified primary key // doesn't exist), or replace an existing item if it has certain attribute values. // You can return the item's attribute values in the same operation, using the -// ReturnValues parameter. When you add an item, the primary key attributes are the -// only required attributes. Empty String and Binary attribute values are allowed. -// Attribute values of type String and Binary must have a length greater than zero -// if the attribute is used as a key attribute for a table or index. Set type -// attributes cannot be empty. Invalid Requests with empty values will be rejected -// with a ValidationException exception. To prevent a new item from replacing an -// existing item, use a conditional expression that contains the -// attribute_not_exists function with the name of the attribute being used as the -// partition key for the table. Since every record must contain that attribute, the -// attribute_not_exists function will only succeed if no matching item exists. For -// more information about PutItem , see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) -// in the Amazon DynamoDB Developer Guide. +// ReturnValues parameter. +// +// When you add an item, the primary key attributes are the only required +// attributes. +// +// Empty String and Binary attribute values are allowed. Attribute values of type +// String and Binary must have a length greater than zero if the attribute is used +// as a key attribute for a table or index. Set type attributes cannot be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name of the +// attribute being used as the partition key for the table. Since every record must +// contain that attribute, the attribute_not_exists function will only succeed if +// no matching item exists. +// +// For more information about PutItem , see [Working with Items] in the Amazon DynamoDB Developer +// Guide. +// +// [Working with Items]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html func (c *Client) PutItem(ctx context.Context, params *PutItemInput, optFns ...func(*Options)) (*PutItemOutput, error) { if params == nil { params = &PutItemInput{} @@ -50,18 +60,27 @@ type PutItemInput struct { // A map of attribute name/value pairs, one for each attribute. Only the primary // key attributes are required; you can optionally provide other attribute - // name-value pairs for the item. You must provide all of the attributes for the - // primary key. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide both - // values for both the partition key and the sort key. If you specify any - // attributes that are part of an index key, then the data types for those - // attributes must match those of the schema in the table's attribute definition. + // name-value pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, with a + // simple primary key, you only need to provide a value for the partition key. For + // a composite primary key, you must provide both values for both the partition key + // and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // // Empty String and Binary attribute values are allowed. Attribute values of type // String and Binary must have a length greater than zero if the attribute is used - // as a key attribute for a table or index. For more information about primary - // keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) - // in the Amazon DynamoDB Developer Guide. Each element in the Item map is an - // AttributeValue object. + // as a key attribute for a table or index. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey // // This member is required. Item map[string]types.AttributeValue @@ -73,70 +92,111 @@ type PutItemInput struct { TableName *string // A condition that must be satisfied in order for a conditional PutItem operation - // to succeed. An expression can contain any of the following: + // to succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information on condition expressions, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information on condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -148,21 +208,29 @@ type PutItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared before // they were updated with the PutItem request. For PutItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - If PutItem overwrote an attribute name-value pair, then the // content of the old item is returned. - // The values returned are strongly consistent. There is no additional cost - // associated with requesting a return value aside from the small network and - // processing overhead of receiving a larger response. No read capacity units are - // consumed. The ReturnValues parameter is used by several DynamoDB operations; - // however, PutItem does not recognize any values other than NONE or ALL_OLD . + // + // The values returned are strongly consistent. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD . ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for a PutItem operation - // that failed a condition check. There is no additional cost associated with - // requesting a return value aside from the small network and processing overhead - // of receiving a larger response. No read capacity units are consumed. + // that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -180,24 +248,30 @@ type PutItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the PutItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. @@ -264,6 +338,12 @@ func (c *Client) addOperationPutItemMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go index 064a2682052..2beb0496542 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go @@ -13,17 +13,21 @@ import ( // Attaches a resource-based policy document to the resource, which can be a table // or stream. When you attach a resource-based policy using this API, the policy -// application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) -// . PutResourcePolicy is an idempotent operation; running it multiple times on -// the same resource using the same policy document will return the same revision -// ID. If you specify an ExpectedRevisionId which doesn't match the current -// policy's RevisionId , the PolicyNotFoundException will be returned. +// application is [eventually consistent]. +// +// PutResourcePolicy is an idempotent operation; running it multiple times on the +// same resource using the same policy document will return the same revision ID. +// If you specify an ExpectedRevisionId that doesn't match the current policy's +// RevisionId , the PolicyNotFoundException will be returned. +// // PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy // request immediately after a PutResourcePolicy request, DynamoDB might return // your previous policy, if there was one, or return the PolicyNotFoundException . // This is because GetResourcePolicy uses an eventually consistent query, and the // metadata for your policy or table might not be available at that moment. Wait // for a few seconds, and then try the GetResourcePolicy request again. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} @@ -41,24 +45,33 @@ func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolic type PutResourcePolicyInput struct { - // An Amazon Web Services resource-based policy document in JSON format. The - // maximum size supported for a resource-based policy document is 20 KB. DynamoDB - // counts whitespaces when calculating the size of a policy against this limit. For - // a full list of all considerations that you should keep in mind while attaching a - // resource-based policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) - // . + // An Amazon Web Services resource-based policy document in JSON format. + // + // - The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. + // + // - Within a resource-based policy, if the action for a DynamoDB service-linked + // role (SLR) to replicate data for a global table is denied, adding or deleting a + // replica will fail with an error. + // + // For a full list of all considerations that apply while attaching a + // resource-based policy, see [Resource-based policy considerations]. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html // // This member is required. Policy *string // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy - // will be attached. The resources you can specify include tables and streams. You - // can control index permissions using the base table's policy. To specify the same - // permission level for your table and its indexes, you can provide both the table - // and index Amazon Resource Name (ARN)s in the Resource field of a given Statement - // in your policy document. Alternatively, to specify different permissions for - // your table, indexes, or both, you can define multiple Statement fields in your - // policy document. + // will be attached. The resources you can specify include tables and streams. + // + // You can control index permissions using the base table's policy. To specify the + // same permission level for your table and its indexes, you can provide both the + // table and index Amazon Resource Name (ARN)s in the Resource field of a given + // Statement in your policy document. Alternatively, to specify different + // permissions for your table, indexes, or both, you can define multiple Statement + // fields in your policy document. // // This member is required. ResourceArn *string @@ -69,11 +82,14 @@ type PutResourcePolicyInput struct { // A string value that you can use to conditionally update your policy. You can // provide the revision ID of your existing policy to make mutating requests - // against that policy. When you provide an expected revision ID, if the revision - // ID of the existing policy on the resource doesn't match or if there's no policy - // attached to the resource, your request will be rejected with a - // PolicyNotFoundException . To conditionally put a policy when no policy exists - // for the resource, specify NO_POLICY for the revision ID. + // against that policy. + // + // When you provide an expected revision ID, if the revision ID of the existing + // policy on the resource doesn't match or if there's no policy attached to the + // resource, your request will be rejected with a PolicyNotFoundException . + // + // To conditionally attach a policy when no policy exists for the resource, + // specify NO_POLICY for the revision ID. ExpectedRevisionId *string noSmithyDocumentSerde @@ -81,7 +97,7 @@ type PutResourcePolicyInput struct { type PutResourcePolicyOutput struct { - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string @@ -149,6 +165,12 @@ func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go index 83805f8d8f1..517efdced03 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go @@ -15,40 +15,53 @@ import ( // You must provide the name of the partition key attribute and a single value for // that attribute. Query returns all items with that partition key value. // Optionally, you can provide a sort key attribute and use a comparison operator -// to refine the search results. Use the KeyConditionExpression parameter to -// provide a specific value for the partition key. The Query operation will return -// all of the items from the table or index with that partition key value. You can -// optionally narrow the scope of the Query operation by specifying a sort key -// value and a comparison operator in KeyConditionExpression . To further refine -// the Query results, you can optionally provide a FilterExpression . A -// FilterExpression determines which items within the results should be returned to -// you. All of the other results are discarded. A Query operation always returns a -// result set. If no matching items are found, the result set will be empty. -// Queries that do not return results consume the minimum number of read capacity -// units for that type of read operation. DynamoDB calculates the number of read -// capacity units consumed based on item size, not on the amount of data that is -// returned to an application. The number of capacity units consumed will be the -// same whether you request all of the attributes (the default behavior) or just -// some of them (using a projection expression). The number will also be the same -// whether or not you use a FilterExpression . Query results are always sorted by -// the sort key value. If the data type of the sort key is Number, the results are -// returned in numeric order; otherwise, the results are returned in order of UTF-8 -// bytes. By default, the sort order is ascending. To reverse the order, set the -// ScanIndexForward parameter to false. A single Query operation will read up to -// the maximum number of items set (if using the Limit parameter) or a maximum of -// 1 MB of data and then apply any filtering to the results using FilterExpression -// . If LastEvaluatedKey is present in the response, you will need to paginate the -// result set. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) -// in the Amazon DynamoDB Developer Guide. FilterExpression is applied after a -// Query finishes, but before the results are returned. A FilterExpression cannot -// contain partition key or sort key attributes. You need to specify those -// attributes in the KeyConditionExpression . A Query operation can return an -// empty result set and a LastEvaluatedKey if all the items read for the page of -// results are filtered out. You can query a table, a local secondary index, or a -// global secondary index. For a query on a table or on a local secondary index, -// you can set the ConsistentRead parameter to true and obtain a strongly -// consistent result. Global secondary indexes support eventually consistent reads -// only, so do not specify ConsistentRead when querying a global secondary index. +// to refine the search results. +// +// Use the KeyConditionExpression parameter to provide a specific value for the +// partition key. The Query operation will return all of the items from the table +// or index with that partition key value. You can optionally narrow the scope of +// the Query operation by specifying a sort key value and a comparison operator in +// KeyConditionExpression . To further refine the Query results, you can +// optionally provide a FilterExpression . A FilterExpression determines which +// items within the results should be returned to you. All of the other results are +// discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume the +// minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a +// FilterExpression . +// +// Query results are always sorted by the sort key value. If the data type of the +// sort key is Number, the results are returned in numeric order; otherwise, the +// results are returned in order of UTF-8 bytes. By default, the sort order is +// ascending. To reverse the order, set the ScanIndexForward parameter to false. +// +// A single Query operation will read up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then apply any +// filtering to the results using FilterExpression . If LastEvaluatedKey is +// present in the response, you will need to paginate the result set. For more +// information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results are +// returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression . +// +// A Query operation can return an empty result set and a LastEvaluatedKey if all +// the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the +// ConsistentRead parameter to true and obtain a strongly consistent result. +// Global secondary indexes support eventually consistent reads only, so do not +// specify ConsistentRead when querying a global secondary index. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(*Options)) (*QueryOutput, error) { if params == nil { params = &QueryInput{} @@ -74,72 +87,106 @@ type QueryInput struct { TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // Determines the read consistency model: If set to true , then the operation uses // strongly consistent reads; otherwise, the operation uses eventually consistent - // reads. Strongly consistent reads are not supported on global secondary indexes. - // If you query a global secondary index with ConsistentRead set to true , you will + // reads. + // + // Strongly consistent reads are not supported on global secondary indexes. If you + // query a global secondary index with ConsistentRead set to true , you will // receive a ValidationException . ConsistentRead *bool // The primary key of the first item that this operation will evaluate. Use the - // value that was returned for LastEvaluatedKey in the previous operation. The - // data type for ExclusiveStartKey must be String, Number, or Binary. No set data - // types are allowed. + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No set + // data types are allowed. ExclusiveStartKey map[string]types.AttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Specifying Conditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Specifying Conditions] in the Amazon + // DynamoDB Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // A string that contains conditions that DynamoDB applies after the Query // operation, but before the data is returned to you. Items that do not satisfy the - // FilterExpression criteria are not returned. A FilterExpression does not allow - // key attributes. You cannot define a filter expression based on a partition key - // or a sort key. A FilterExpression is applied after the items have already been - // read; the process of filtering does not consume any additional read capacity - // units. For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html) - // in the Amazon DynamoDB Developer Guide. + // FilterExpression criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html FilterExpression *string // The name of an index to query. This index can be any local secondary index or @@ -148,16 +195,26 @@ type QueryInput struct { IndexName *string // The condition that specifies the key values for items to be retrieved by the - // Query action. The condition must perform an equality test on a single partition - // key value. The condition can optionally perform one of several comparison tests - // on a single sort key value. This allows Query to retrieve one item with a given + // Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given // partition key value and sort key value, or several items that have the same - // partition key value but different sort key values. The partition key equality - // test is required, and must be specified in the following format: - // partitionKeyName = :partitionkeyval If you also want to provide a condition for - // the sort key, it must be combined using AND with the condition for the sort - // key. Following is an example, using the = comparison operator for the sort key: - // partitionKeyName + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName + // // = // // :partitionkeyval @@ -167,45 +224,64 @@ type QueryInput struct { // sortKeyName // // = - // :sortkeyval Valid comparisons for the sort key condition are as follows: + // + // :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // // - sortKeyName = :sortkeyval - true if the sort key value is equal to // :sortkeyval . + // // - sortKeyName < :sortkeyval - true if the sort key value is less than // :sortkeyval . + // // - sortKeyName <= :sortkeyval - true if the sort key value is less than or // equal to :sortkeyval . + // // - sortKeyName > :sortkeyval - true if the sort key value is greater than // :sortkeyval . + // // - sortKeyName >= :sortkeyval - true if the sort key value is greater than or // equal to :sortkeyval . + // // - sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key // value is greater than or equal to :sortkeyval1 , and less than or equal to // :sortkeyval2 . + // // - begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value // begins with a particular operand. (You cannot use this function with a sort key // that is of type Number.) Note that the function name begins_with is // case-sensitive. + // // Use the ExpressionAttributeValues parameter to replace tokens such as - // :partitionval and :sortval with actual values at runtime. You can optionally - // use the ExpressionAttributeNames parameter to replace the names of the - // partition key and sort key with placeholder tokens. This option might be - // necessary if an attribute name conflicts with a DynamoDB reserved word. For - // example, the following KeyConditionExpression parameter causes an error because - // Size is a reserved word: + // :partitionval and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace the + // names of the partition key and sort key with placeholder tokens. This option + // might be necessary if an attribute name conflicts with a DynamoDB reserved word. + // For example, the following KeyConditionExpression parameter causes an error + // because Size is a reserved word: + // // - Size = :myval + // // To work around this, define a placeholder (such a #S ) to represent the // attribute name Size. KeyConditionExpression then is as follows: + // // - #S = :myval - // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide. For more information on - // ExpressionAttributeNames and ExpressionAttributeValues , see Using Placeholders - // for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) - // in the Amazon DynamoDB Developer Guide. + // + // For a list of reserved words, see [Reserved Words] in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues , + // see [Using Placeholders for Attribute Names and Values]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Using Placeholders for Attribute Names and Values]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html KeyConditionExpression *string // This is a legacy parameter. Use KeyConditionExpression instead. For more - // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [KeyConditions]in the Amazon DynamoDB Developer Guide. + // + // [KeyConditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html KeyConditions map[string]types.Condition // The maximum number of items to evaluate (not necessarily the number of matching @@ -216,82 +292,104 @@ type QueryInput struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit *int32 // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the expression must be separated by commas. If no attribute names - // are specified, then all attributes will be returned. If any of the requested - // attributes are not found, they will not appear in the result. For more - // information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) - // in the Amazon DynamoDB Developer Guide. + // see [QueryFilter]in the Amazon DynamoDB Developer Guide. + // + // [QueryFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html QueryFilter map[string]types.Condition // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // Specifies the order for index traversal: If true (default), the traversal is // performed in ascending order; if false , the traversal is performed in - // descending order. Items with the same partition key value are stored in sorted - // order by sort key. If the sort key data type is Number, the results are stored - // in numeric order. For type String, the results are stored in order of UTF-8 - // bytes. For type Binary, DynamoDB treats each byte of the binary data as - // unsigned. If ScanIndexForward is true , DynamoDB returns the results in the - // order in which they are stored (by sort key value). This is the default - // behavior. If ScanIndexForward is false , DynamoDB reads the results in reverse - // order by sort key value, and then returns the results to the client. + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort key. + // If the sort key data type is Number, the results are stored in numeric order. + // For type String, the results are stored in order of UTF-8 bytes. For type + // Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true , DynamoDB returns the results in the order in which + // they are stored (by sort key value). This is the default behavior. If + // ScanIndexForward is false , DynamoDB reads the results in reverse order by sort + // key value, and then returns the results to the client. ScanIndexForward *bool // The attributes to be returned in the result. You can retrieve all item // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. + // // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table // or index. If you query a local secondary index, then for each matching item in // the index, DynamoDB fetches the entire item from the parent table. If the index // is configured to project all item attributes, then all of the data can be // obtained from the local secondary index, and no fetching is required. + // // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is // configured to project all attributes, this return value is equivalent to // specifying ALL_ATTRIBUTES . + // // - COUNT - Returns the number of matching items, rather than the matching items // themselves. Note that this uses the same quantity of read capacity units as // getting the items, and is subject to the same item size calculations. + // // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in // ProjectionExpression . This return value is equivalent to specifying - // ProjectionExpression without specifying any value for Select . If you query or - // scan a local secondary index and request only attributes that are projected into - // that index, the operation will read only the index and not the table. If any of - // the requested attributes are not projected into the local secondary index, - // DynamoDB fetches each of these attributes from the parent table. This extra - // fetching incurs additional throughput cost and latency. If you query or scan a - // global secondary index, you can only request attributes that are projected into - // the index. Global secondary index queries cannot fetch attributes from the - // parent table. + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when // accessing an index. You cannot use both Select and ProjectionExpression // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES // . (This usage is equivalent to specifying ProjectionExpression without any - // value for Select .) If you use the ProjectionExpression parameter, then the - // value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select - // will return an error. + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. Select types.Select noSmithyDocumentSerde @@ -303,15 +401,19 @@ type QueryOutput struct { // The capacity units consumed by the Query operation. The data returned includes // the total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned if - // the ReturnConsumedCapacity parameter was specified. For more information, see - // Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations] // in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption ConsumedCapacity *types.ConsumedCapacity - // The number of items in the response. If you used a QueryFilter in the request, - // then Count is the number of items returned after the filter was applied, and - // ScannedCount is the number of matching items before the filter was applied. If - // you did not use a filter in the request, then Count and ScannedCount are the + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are the // same. Count int32 @@ -321,18 +423,24 @@ type QueryOutput struct { // The primary key of the item where the operation stopped, inclusive of the // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedKey is empty, then the "last page" of - // results has been processed and there is no more data to be retrieved. If - // LastEvaluatedKey is not empty, it does not necessarily mean that there is more - // data in the result set. The only way to know when you have reached the end of - // the result set is when LastEvaluatedKey is empty. + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. LastEvaluatedKey map[string]types.AttributeValue // The number of items evaluated, before any QueryFilter is applied. A high // ScannedCount value with few, or no, Count results indicates an inefficient Query - // operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) - // in the Amazon DynamoDB Developer Guide. If you did not use a filter in the - // request, then ScannedCount is the same as Count . + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Count ScannedCount int32 // Metadata pertaining to the operation's result. @@ -399,6 +507,12 @@ func (c *Client) addOperationQueryMiddlewares(stack *middleware.Stack, options O if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpQueryValidationMiddleware(stack); err != nil { return err } @@ -429,56 +543,6 @@ func (c *Client) addOperationQueryMiddlewares(stack *middleware.Stack, options O return nil } -func addOpQueryDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { - return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ - Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ - func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { - opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS - opt.Logger = o.Logger - }, - }, - DiscoverOperation: c.fetchOpQueryDiscoverEndpoint, - EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, - EndpointDiscoveryRequired: false, - Region: o.Region, - }, "ResolveEndpointV2", middleware.After) -} - -func (c *Client) fetchOpQueryDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { - input := getOperationInput(ctx) - in, ok := input.(*QueryInput) - if !ok { - return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) - } - _ = in - - identifierMap := make(map[string]string, 0) - identifierMap["sdk#Region"] = region - - key := fmt.Sprintf("DynamoDB.%v", identifierMap) - - if v, ok := c.endpointCache.Get(key); ok { - return v, nil - } - - discoveryOperationInput := &DescribeEndpointsInput{} - - opt := internalEndpointDiscovery.DiscoverEndpointOptions{} - for _, fn := range optFns { - fn(&opt) - } - - go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) - return internalEndpointDiscovery.WeightedAddress{}, nil -} - -// QueryAPIClient is a client that implements the Query operation. -type QueryAPIClient interface { - Query(context.Context, *QueryInput, ...func(*Options)) (*QueryOutput, error) -} - -var _ QueryAPIClient = (*Client)(nil) - // QueryPaginatorOptions is the paginator options for Query type QueryPaginatorOptions struct { // The maximum number of items to evaluate (not necessarily the number of matching @@ -489,8 +553,9 @@ type QueryPaginatorOptions struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit int32 } @@ -547,6 +612,9 @@ func (p *QueryPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) } params.Limit = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.Query(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -561,6 +629,56 @@ func (p *QueryPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) return result, nil } +func addOpQueryDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpQueryDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpQueryDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*QueryInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// QueryAPIClient is a client that implements the Query operation. +type QueryAPIClient interface { + Query(context.Context, *QueryInput, ...func(*Options)) (*QueryOutput, error) +} + +var _ QueryAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opQuery(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go index 0bba0c74de6..ea13a605784 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go @@ -13,14 +13,22 @@ import ( ) // Creates a new table from an existing backup. Any number of users can execute up -// to 50 concurrent restores (any type of restore) in a given account. You can call -// RestoreTableFromBackup at a maximum rate of 10 times per second. You must -// manually set up the following on the restored table: +// to 50 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// // - Auto scaling policies +// // - IAM policies +// // - Amazon CloudWatch metrics and alarms +// // - Tags +// // - Stream settings +// // - Time to Live (TTL) settings func (c *Client) RestoreTableFromBackup(ctx context.Context, params *RestoreTableFromBackupInput, optFns ...func(*Options)) (*RestoreTableFromBackupOutput, error) { if params == nil { @@ -62,6 +70,11 @@ type RestoreTableFromBackupInput struct { // of the indexes at the time of restore. LocalSecondaryIndexOverride []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *types.ProvisionedThroughput @@ -140,6 +153,12 @@ func (c *Client) addOperationRestoreTableFromBackupMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRestoreTableFromBackupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go index 9049d3d3a14..aab521b0731 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go @@ -17,23 +17,40 @@ import ( // EarliestRestorableDateTime and LatestRestorableDateTime . You can restore your // table to any point in time during the last 35 days. Any number of users can // execute up to 50 concurrent restores (any type of restore) in a given account. -// When you restore using point in time recovery, DynamoDB restores your table data -// to the state based on the selected date and time (day:hour:minute:second) to a -// new table. Along with data, the following are also included on the new restored -// table using point in time recovery: +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// // - Global secondary indexes (GSIs) +// // - Local secondary indexes (LSIs) +// // - Provisioned read and write capacity -// - Encryption settings All these settings come from the current settings of -// the source table at the time of restore. +// +// - Encryption settings +// +// All these settings come from the current settings of the source table at the +// +// time of restore. // // You must manually set up the following on the restored table: +// // - Auto scaling policies +// // - IAM policies +// // - Amazon CloudWatch metrics and alarms +// // - Tags +// // - Stream settings +// // - Time to Live (TTL) settings +// // - Point in time recovery settings func (c *Client) RestoreTableToPointInTime(ctx context.Context, params *RestoreTableToPointInTimeInput, optFns ...func(*Options)) (*RestoreTableToPointInTimeOutput, error) { if params == nil { @@ -70,6 +87,11 @@ type RestoreTableToPointInTimeInput struct { // of the indexes at the time of restore. LocalSecondaryIndexOverride []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *types.ProvisionedThroughput @@ -162,6 +184,12 @@ func (c *Client) addOperationRestoreTableToPointInTimeMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRestoreTableToPointInTimeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go index 4a5387288e1..10e174a77a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go @@ -14,37 +14,48 @@ import ( // The Scan operation returns one or more items and item attributes by accessing // every item in a table or a secondary index. To have DynamoDB return fewer items, -// you can provide a FilterExpression operation. If the total size of scanned -// items exceeds the maximum dataset size limit of 1 MB, the scan completes and -// results are returned to the user. The LastEvaluatedKey value is also returned -// and the requestor can use the LastEvaluatedKey to continue the scan in a -// subsequent operation. Each scan response also includes number of items that were -// scanned (ScannedCount) as part of the request. If using a FilterExpression , a -// scan result can result in no items meeting the criteria and the Count will -// result in zero. If you did not use a FilterExpression in the scan request, then -// Count is the same as ScannedCount . Count and ScannedCount only return the -// count of items specific to a single scan request and, unless the table is less -// than 1MB, do not represent the total number of items in the table. A single Scan -// operation first reads up to the maximum number of items set (if using the Limit -// parameter) or a maximum of 1 MB of data and then applies any filtering to the -// results if a FilterExpression is provided. If LastEvaluatedKey is present in -// the response, pagination is required to complete the full table scan. For more -// information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) -// in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; -// however, for faster performance on a large table or secondary index, -// applications can request a parallel Scan operation by providing the Segment and -// TotalSegments parameters. For more information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) -// in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually -// consistent reads when accessing the items in a table. Therefore, the results -// from an eventually consistent Scan may not include the latest item changes at -// the time the scan iterates through each item in the table. If you require a -// strongly consistent read of each item as the scan iterates through the items in -// the table, you can set the ConsistentRead parameter to true. Strong consistency -// only relates to the consistency of the read at the item level. DynamoDB does not -// provide snapshot isolation for a scan operation when the ConsistentRead -// parameter is set to true. Thus, a DynamoDB scan operation does not guarantee -// that all reads in a scan see a consistent snapshot of the table when the scan -// operation was requested. +// you can provide a FilterExpression operation. +// +// If the total size of scanned items exceeds the maximum dataset size limit of 1 +// MB, the scan completes and results are returned to the user. The +// LastEvaluatedKey value is also returned and the requestor can use the +// LastEvaluatedKey to continue the scan in a subsequent operation. Each scan +// response also includes number of items that were scanned (ScannedCount) as part +// of the request. If using a FilterExpression , a scan result can result in no +// items meeting the criteria and the Count will result in zero. If you did not +// use a FilterExpression in the scan request, then Count is the same as +// ScannedCount . +// +// Count and ScannedCount only return the count of items specific to a single scan +// request and, unless the table is less than 1MB, do not represent the total +// number of items in the table. +// +// A single Scan operation first reads up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then applies any +// filtering to the results if a FilterExpression is provided. If LastEvaluatedKey +// is present in the response, pagination is required to complete the full table +// scan. For more information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on a +// large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see [Parallel Scan]in the Amazon DynamoDB Developer Guide. +// +// By default, a Scan uses eventually consistent reads when accessing the items in +// a table. Therefore, the results from an eventually consistent Scan may not +// include the latest item changes at the time the scan iterates through each item +// in the table. If you require a strongly consistent read of each item as the scan +// iterates through the items in the table, you can set the ConsistentRead +// parameter to true. Strong consistency only relates to the consistency of the +// read at the item level. +// +// DynamoDB does not provide snapshot isolation for a scan operation when the +// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does +// not guarantee that all reads in a scan see a consistent snapshot of the table +// when the scan operation was requested. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination +// [Parallel Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Options)) (*ScanOutput, error) { if params == nil { params = &ScanInput{} @@ -64,83 +75,123 @@ func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Op type ScanInput struct { // The name of the table containing the requested items or if you provide IndexName - // , the name of the table to which that index belongs. You can also provide the - // Amazon Resource Name (ARN) of the table in this parameter. + // , the name of the table to which that index belongs. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. // // This member is required. TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // A Boolean value that determines the read consistency model during the scan: + // // - If ConsistentRead is false , then the data returned from Scan might not // contain the results from other recently completed write operations ( PutItem , // UpdateItem , or DeleteItem ). + // // - If ConsistentRead is true , then all of the write operations that completed // before the Scan began are guaranteed to be contained in the Scan response. - // The default setting for ConsistentRead is false . The ConsistentRead parameter - // is not supported on global secondary indexes. If you scan a global secondary - // index with ConsistentRead set to true, you will receive a ValidationException . + // + // The default setting for ConsistentRead is false . + // + // The ConsistentRead parameter is not supported on global secondary indexes. If + // you scan a global secondary index with ConsistentRead set to true, you will + // receive a ValidationException . ConsistentRead *bool // The primary key of the first item that this operation will evaluate. Use the - // value that was returned for LastEvaluatedKey in the previous operation. The - // data type for ExclusiveStartKey must be String, Number or Binary. No set data - // types are allowed. In a parallel scan, a Scan request that includes - // ExclusiveStartKey must specify the same segment whose previous Scan returned - // the corresponding value of LastEvaluatedKey . + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No set + // data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey . ExclusiveStartKey map[string]types.AttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // A string that contains conditions that DynamoDB applies after the Scan // operation, but before the data is returned to you. Items that do not satisfy the - // FilterExpression criteria are not returned. A FilterExpression is applied after - // the items have already been read; the process of filtering does not consume any - // additional read capacity units. For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) - // in the Amazon DynamoDB Developer Guide. + // FilterExpression criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression FilterExpression *string // The name of a secondary index to scan. This index can be any local secondary @@ -156,92 +207,120 @@ type ScanInput struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit *int32 // A string that identifies one or more attributes to retrieve from the specified // table or index. These attributes can include scalars, sets, or elements of a - // JSON document. The attributes in the expression must be separated by commas. If - // no attribute names are specified, then all attributes will be returned. If any - // of the requested attributes are not found, they will not appear in the result. - // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) - // in the Amazon DynamoDB Developer Guide. + // see [ScanFilter]in the Amazon DynamoDB Developer Guide. + // + // [ScanFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html ScanFilter map[string]types.Condition // For a parallel Scan request, Segment identifies an individual segment to be - // scanned by an application worker. Segment IDs are zero-based, so the first - // segment is always 0. For example, if you want to use four application threads to - // scan a table or an index, then the first thread specifies a Segment value of 0, - // the second thread specifies 1, and so on. The value of LastEvaluatedKey - // returned from a parallel Scan request must be used as ExclusiveStartKey with - // the same segment ID in a subsequent Scan operation. The value for Segment must - // be greater than or equal to 0, and less than the value provided for - // TotalSegments . If you provide Segment , you must also provide TotalSegments . + // scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, if + // you want to use four application threads to scan a table or an index, then the + // first thread specifies a Segment value of 0, the second thread specifies 1, and + // so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must be + // used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments . + // + // If you provide Segment , you must also provide TotalSegments . Segment *int32 // The attributes to be returned in the result. You can retrieve all item // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. + // // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table // or index. If you query a local secondary index, then for each matching item in // the index, DynamoDB fetches the entire item from the parent table. If the index // is configured to project all item attributes, then all of the data can be // obtained from the local secondary index, and no fetching is required. + // // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is // configured to project all attributes, this return value is equivalent to // specifying ALL_ATTRIBUTES . + // // - COUNT - Returns the number of matching items, rather than the matching items // themselves. Note that this uses the same quantity of read capacity units as // getting the items, and is subject to the same item size calculations. + // // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in // ProjectionExpression . This return value is equivalent to specifying - // ProjectionExpression without specifying any value for Select . If you query or - // scan a local secondary index and request only attributes that are projected into - // that index, the operation reads only the index and not the table. If any of the - // requested attributes are not projected into the local secondary index, DynamoDB - // fetches each of these attributes from the parent table. This extra fetching - // incurs additional throughput cost and latency. If you query or scan a global - // secondary index, you can only request attributes that are projected into the - // index. Global secondary index queries cannot fetch attributes from the parent - // table. + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation reads only the index and not the + // table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when // accessing an index. You cannot use both Select and ProjectionExpression // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES // . (This usage is equivalent to specifying ProjectionExpression without any - // value for Select .) If you use the ProjectionExpression parameter, then the - // value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select - // will return an error. + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. Select types.Select // For a parallel Scan request, TotalSegments represents the total number of // segments into which the Scan operation will be divided. The value of // TotalSegments corresponds to the number of application workers that will perform // the parallel scan. For example, if you want to use four application threads to - // scan a table or an index, specify a TotalSegments value of 4. The value for - // TotalSegments must be greater than or equal to 1, and less than or equal to - // 1000000. If you specify a TotalSegments value of 1, the Scan operation will be - // sequential rather than parallel. If you specify TotalSegments , you must also - // specify Segment . + // scan a table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less than + // or equal to 1000000. If you specify a TotalSegments value of 1, the Scan + // operation will be sequential rather than parallel. + // + // If you specify TotalSegments , you must also specify Segment . TotalSegments *int32 noSmithyDocumentSerde @@ -253,15 +332,20 @@ type ScanOutput struct { // The capacity units consumed by the Scan operation. The data returned includes // the total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned if - // the ReturnConsumedCapacity parameter was specified. For more information, see - // Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations] // in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption ConsumedCapacity *types.ConsumedCapacity - // The number of items in the response. If you set ScanFilter in the request, then - // Count is the number of items returned after the filter was applied, and - // ScannedCount is the number of matching items before the filter was applied. If - // you did not use a filter in the request, then Count is the same as ScannedCount . + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as + // ScannedCount . Count int32 // An array of item attributes that match the scan criteria. Each element in this @@ -270,18 +354,24 @@ type ScanOutput struct { // The primary key of the item where the operation stopped, inclusive of the // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedKey is empty, then the "last page" of - // results has been processed and there is no more data to be retrieved. If - // LastEvaluatedKey is not empty, it does not necessarily mean that there is more - // data in the result set. The only way to know when you have reached the end of - // the result set is when LastEvaluatedKey is empty. + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. LastEvaluatedKey map[string]types.AttributeValue // The number of items evaluated, before any ScanFilter is applied. A high // ScannedCount value with few, or no, Count results indicates an inefficient Scan - // operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) - // in the Amazon DynamoDB Developer Guide. If you did not use a filter in the - // request, then ScannedCount is the same as Count . + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count ScannedCount int32 // Metadata pertaining to the operation's result. @@ -348,6 +438,12 @@ func (c *Client) addOperationScanMiddlewares(stack *middleware.Stack, options Op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpScanValidationMiddleware(stack); err != nil { return err } @@ -378,56 +474,6 @@ func (c *Client) addOperationScanMiddlewares(stack *middleware.Stack, options Op return nil } -func addOpScanDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { - return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ - Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ - func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { - opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS - opt.Logger = o.Logger - }, - }, - DiscoverOperation: c.fetchOpScanDiscoverEndpoint, - EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, - EndpointDiscoveryRequired: false, - Region: o.Region, - }, "ResolveEndpointV2", middleware.After) -} - -func (c *Client) fetchOpScanDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { - input := getOperationInput(ctx) - in, ok := input.(*ScanInput) - if !ok { - return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) - } - _ = in - - identifierMap := make(map[string]string, 0) - identifierMap["sdk#Region"] = region - - key := fmt.Sprintf("DynamoDB.%v", identifierMap) - - if v, ok := c.endpointCache.Get(key); ok { - return v, nil - } - - discoveryOperationInput := &DescribeEndpointsInput{} - - opt := internalEndpointDiscovery.DiscoverEndpointOptions{} - for _, fn := range optFns { - fn(&opt) - } - - go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) - return internalEndpointDiscovery.WeightedAddress{}, nil -} - -// ScanAPIClient is a client that implements the Scan operation. -type ScanAPIClient interface { - Scan(context.Context, *ScanInput, ...func(*Options)) (*ScanOutput, error) -} - -var _ ScanAPIClient = (*Client)(nil) - // ScanPaginatorOptions is the paginator options for Scan type ScanPaginatorOptions struct { // The maximum number of items to evaluate (not necessarily the number of matching @@ -438,8 +484,9 @@ type ScanPaginatorOptions struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit int32 } @@ -496,6 +543,9 @@ func (p *ScanPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) } params.Limit = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.Scan(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -510,6 +560,56 @@ func (p *ScanPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) return result, nil } +func addOpScanDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpScanDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpScanDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ScanInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// ScanAPIClient is a client that implements the Scan operation. +type ScanAPIClient interface { + Scan(context.Context, *ScanInput, ...func(*Options)) (*ScanOutput, error) +} + +var _ ScanAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opScan(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go index 9c953eba4ff..7d9d34c8c48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go @@ -15,9 +15,12 @@ import ( // Associate a set of tags with an Amazon DynamoDB resource. You can then activate // these user-defined tags so that they appear on the Billing and Cost Management // console for cost allocation tracking. You can call TagResource up to five times -// per second, per account. For an overview on tagging DynamoDB resources, see -// Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -114,6 +117,12 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go index 311145a5ee1..f991904d95b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go @@ -18,12 +18,18 @@ import ( // each of which contains a Get structure that specifies an item to retrieve from // a table in the account and Region. A call to TransactGetItems cannot retrieve // items from tables in more than one Amazon Web Services account or Region. The -// aggregate size of the items in the transaction cannot exceed 4 MB. DynamoDB -// rejects the entire TransactGetItems request if any of the following is true: +// aggregate size of the items in the transaction cannot exceed 4 MB. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following is +// true: +// // - A conflicting operation is in the process of updating an item to be read. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - There is a user error, such as an invalid data format. +// // - The aggregate size of the items in the transaction exceeded 4 MB. func (c *Client) TransactGetItems(ctx context.Context, params *TransactGetItemsInput, optFns ...func(*Options)) (*TransactGetItemsOutput, error) { if params == nil { @@ -67,10 +73,11 @@ type TransactGetItemsOutput struct { // An ordered array of up to 100 ItemResponse objects, each of which corresponds // to the TransactGetItem object in the same position in the TransactItems array. // Each ItemResponse object contains a Map of the name-value pairs that are the - // projected attributes of the requested item. If a requested item could not be - // retrieved, the corresponding ItemResponse object is Null, or if the requested - // item has no projected attributes, the corresponding ItemResponse object is an - // empty Map. + // projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. Responses []types.ItemResponse // Metadata pertaining to the operation's result. @@ -137,6 +144,12 @@ func (c *Client) addOperationTransactGetItemsMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTransactGetItemsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go index 0582d6f080e..3d47daf86ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go @@ -16,25 +16,30 @@ import ( // action requests. These actions can target items in different tables, but not in // different Amazon Web Services accounts or Regions, and no two actions can target // the same item. For example, you cannot both ConditionCheck and Update the same -// item. The aggregate size of the items in the transaction cannot exceed 4 MB. The -// actions are completed atomically so that either all of them succeed, or all of -// them fail. They are defined by the following objects: +// item. The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// The actions are completed atomically so that either all of them succeed, or all +// of them fail. They are defined by the following objects: +// // - Put — Initiates a PutItem operation to write a new item. This structure // specifies the primary key of the item to be written, the name of the table to // write it in, an optional condition expression that must be satisfied for the // write to succeed, a list of the item's attributes, and a field indicating // whether to retrieve the item's attributes if the condition is not met. +// // - Update — Initiates an UpdateItem operation to update an existing item. This // structure specifies the primary key of the item to be updated, the name of the // table where it resides, an optional condition expression that must be satisfied // for the update to succeed, an expression that defines one or more attributes to // be updated, and a field indicating whether to retrieve the item's attributes if // the condition is not met. +// // - Delete — Initiates a DeleteItem operation to delete an existing item. This // structure specifies the primary key of the item to be deleted, the name of the // table where it resides, an optional condition expression that must be satisfied // for the deletion to succeed, and a field indicating whether to retrieve the // item's attributes if the condition is not met. +// // - ConditionCheck — Applies a condition to an item that is not being modified // by the transaction. This structure specifies the primary key of the item to be // checked, the name of the table where it resides, a condition expression that @@ -43,14 +48,20 @@ import ( // // DynamoDB rejects the entire TransactWriteItems request if any of the following // is true: +// // - A condition in one of the condition expressions is not met. +// // - An ongoing operation is in the process of updating the same item. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - An item size becomes too large (bigger than 400 KB), a local secondary // index (LSI) becomes too large, or a similar validation error occurs because of // changes made by the transaction. +// // - The aggregate size of the items in the transaction exceeds 4 MB. +// // - There is a user error, such as an invalid data format. func (c *Client) TransactWriteItems(ctx context.Context, params *TransactWriteItemsInput, optFns ...func(*Options)) (*TransactWriteItemsOutput, error) { if params == nil { @@ -79,30 +90,39 @@ type TransactWriteItemsInput struct { // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, // meaning that multiple identical calls have the same effect as one single call. + // // Although multiple identical calls using the same client request token produce // the same result on the server (no side effects), the responses to the calls // might not be the same. If the ReturnConsumedCapacity parameter is set, then the // initial TransactWriteItems call returns the amount of write capacity units // consumed in making the changes. Subsequent TransactWriteItems calls with the // same client token return the number of read capacity units consumed in reading - // the item. A client request token is valid for 10 minutes after the first request - // that uses it is completed. After 10 minutes, any request with the same client - // token is treated as a new request. Do not resubmit the same request with the - // same client token for more than 10 minutes, or the result might not be - // idempotent. If you submit a request with the same client token but a change in - // other parameters within the 10-minute idempotency window, DynamoDB returns an + // the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client token + // is treated as a new request. Do not resubmit the same request with the same + // client token for more than 10 minutes, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an // IdempotentParameterMismatch exception. ClientRequestToken *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -191,6 +211,12 @@ func (c *Client) addOperationTransactWriteItemsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opTransactWriteItemsMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go index 03d7cc90656..d24b12bc9f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go @@ -12,9 +12,12 @@ import ( ) // Removes the association of tags from an Amazon DynamoDB resource. You can call -// UntagResource up to five times per second, per account. For an overview on -// tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} @@ -112,6 +115,12 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go index 8ced17d6b31..ef37cf18cee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go @@ -16,11 +16,14 @@ import ( // specified table. A successful UpdateContinuousBackups call returns the current // ContinuousBackupsDescription . Continuous backups are ENABLED on all tables at // table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus -// will be set to ENABLED. Once continuous backups and point in time recovery are -// enabled, you can restore to any point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime . LatestRestorableDateTime is typically 5 minutes -// before the current time. You can restore your table to any point in time during -// the last 35 days. +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time during the last 35 days. func (c *Client) UpdateContinuousBackups(ctx context.Context, params *UpdateContinuousBackupsInput, optFns ...func(*Options)) (*UpdateContinuousBackupsOutput, error) { if params == nil { params = &UpdateContinuousBackupsInput{} @@ -122,6 +125,12 @@ func (c *Client) addOperationUpdateContinuousBackupsMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateContinuousBackupsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go index 403303e7a94..83cb7e083bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go @@ -124,6 +124,12 @@ func (c *Client) addOperationUpdateContributorInsightsMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateContributorInsightsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go index 767af7c3f3a..d90acdf7a37 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go @@ -16,25 +16,40 @@ import ( // already exist to be able to use this operation. Any replica to be added must be // empty, have the same name as the global table, have the same key schema, have // DynamoDB Streams enabled, and have the same provisioned and maximum write -// capacity units. This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html) -// instead. Although you can use UpdateGlobalTable to add replicas and remove -// replicas in a single request, for simplicity we recommend that you issue -// separate requests for adding or removing replicas. If global secondary indexes -// are specified, then the following conditions must also be met: +// capacity units. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). If you are using global tables [Version 2019.11.21]you can use [UpdateTable] +// instead. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas in a +// single request, for simplicity we recommend that you issue separate requests for +// adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions must +// also be met: +// // - The global secondary indexes must have the same name. +// // - The global secondary indexes must have the same hash key and sort key (if // present). +// // - The global secondary indexes must have the same provisioned and maximum // write capacity units. +// +// [UpdateTable]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) UpdateGlobalTable(ctx context.Context, params *UpdateGlobalTableInput, optFns ...func(*Options)) (*UpdateGlobalTableOutput, error) { if params == nil { params = &UpdateGlobalTableInput{} @@ -134,6 +149,12 @@ func (c *Client) addOperationUpdateGlobalTableMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateGlobalTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go index 2b8b9c1ba59..94616f2f602 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go @@ -12,15 +12,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates settings for a global table. This operation only applies to Version -// 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher -// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// Updates settings for a global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) UpdateGlobalTableSettings(ctx context.Context, params *UpdateGlobalTableSettingsInput, optFns ...func(*Options)) (*UpdateGlobalTableSettingsOutput, error) { if params == nil { params = &UpdateGlobalTableSettingsInput{} @@ -45,12 +49,15 @@ type UpdateGlobalTableSettingsInput struct { // The billing mode of the global table. If GlobalTableBillingMode is not // specified, the global table defaults to PROVISIONED capacity billing mode. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html GlobalTableBillingMode types.BillingMode // Represents the settings of a global secondary index for a global table that @@ -143,6 +150,12 @@ func (c *Client) addOperationUpdateGlobalTableSettingsMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateGlobalTableSettingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go index 12b11218b34..616ec73f349 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go @@ -16,9 +16,10 @@ import ( // not already exist. You can put, delete, or add attribute values. You can also // perform a conditional update on an existing item (insert a new attribute // name-value pair if it doesn't exist, or replace an existing name-value pair if -// it has certain expected attribute values). You can also return the item's -// attribute values in the same UpdateItem operation using the ReturnValues -// parameter. +// it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem +// operation using the ReturnValues parameter. func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns ...func(*Options)) (*UpdateItemOutput, error) { if params == nil { params = &UpdateItemInput{} @@ -38,10 +39,12 @@ func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns type UpdateItemInput struct { // The primary key of the item to be updated. Each element consists of an - // attribute name and a value for that attribute. For the primary key, you must - // provide all of the attributes. For example, with a simple primary key, you only - // need to provide a value for the partition key. For a composite primary key, you - // must provide values for both the partition key and the sort key. + // attribute name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -53,75 +56,116 @@ type UpdateItemInput struct { TableName *string // This is a legacy parameter. Use UpdateExpression instead. For more information, - // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) - // in the Amazon DynamoDB Developer Guide. + // see [AttributeUpdates]in the Amazon DynamoDB Developer Guide. + // + // [AttributeUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html AttributeUpdates map[string]types.AttributeValueUpdate - // A condition that must be satisfied in order for a conditional update to - // succeed. An expression can contain any of the following: + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information about condition expressions, see Specifying Conditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information about condition expressions, see [Specifying Conditions] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide.) To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information about - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Specifying Item Attributes] in the Amazon + // DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -133,78 +177,110 @@ type UpdateItemInput struct { // Use ReturnValues if you want to get the item attributes as they appear before // or after they are successfully updated. For UpdateItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - Returns all of the attributes of the item, as they appeared before // the UpdateItem operation. + // // - UPDATED_OLD - Returns only the updated attributes, as they appeared before // the UpdateItem operation. + // // - ALL_NEW - Returns all of the attributes of the item, as they appear after // the UpdateItem operation. + // // - UPDATED_NEW - Returns only the updated attributes, as they appear after the // UpdateItem operation. + // // There is no additional cost associated with requesting a return value aside // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. The values returned are strongly - // consistent. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for an UpdateItem - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure // An expression that defines one or more attributes to be updated, the action to - // be performed on them, and new values for them. The following action values are - // available for UpdateExpression . + // be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression . + // // - SET - Adds one or more attributes and values to an item. If any of these // attributes already exist, they are replaced by the new values. You can also use // SET to add or subtract from an attribute that is of type Number. For example: - // SET myNum = myNum + :val SET supports the following functions: + // SET myNum = myNum + :val + // + // SET supports the following functions: + // // - if_not_exists (path, operand) - if the item does not contain an attribute at // the specified path, then if_not_exists evaluates to operand; otherwise, it // evaluates to path. You can use this function to avoid overwriting an attribute // that may already be present in the item. + // // - list_append (operand, operand) - evaluates to a list with a new element // added to it. You can append the new element to the start or the end of the list - // by reversing the order of the operands. These function names are - // case-sensitive. + // by reversing the order of the operands. + // + // These function names are case-sensitive. + // // - REMOVE - Removes one or more attributes from an item. + // // - ADD - Adds the specified value to the item, if the attribute does not // already exist. If the attribute does exist, then the behavior of ADD depends // on the data type of the attribute: + // // - If the existing attribute is a number, and if Value is also a number, then // Value is mathematically added to the existing attribute. If Value is a - // negative number, then it is subtracted from the existing attribute. If you use - // ADD to increment or decrement a number value for an item that doesn't exist - // before the update, DynamoDB uses 0 as the initial value. Similarly, if you use - // ADD for an existing item to increment or decrement an attribute value that - // doesn't exist before the update, DynamoDB uses 0 as the initial value. For - // example, suppose that the item you want to update doesn't have an attribute - // named itemcount , but you decide to ADD the number 3 to this attribute anyway. - // DynamoDB will create the itemcount attribute, set its initial value to 0 , and - // finally add 3 to it. The result will be a new itemcount attribute in the item, - // with a value of 3 . + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement an + // attribute value that doesn't exist before the update, DynamoDB uses 0 as the + // initial value. For example, suppose that the item you want to update doesn't + // have an attribute named itemcount , but you decide to ADD the number 3 to this + // attribute anyway. DynamoDB will create the itemcount attribute, set its + // initial value to 0 , and finally add 3 to it. The result will be a new + // itemcount attribute in the item, with a value of 3 . + // // - If the existing data type is a set and if Value is also a set, then Value is // added to the existing set. For example, if the attribute value is the set // [1,2] , and the ADD action specified [3] , then the final attribute value is // [1,2,3] . An error occurs if an ADD action is specified for a set attribute - // and the attribute type specified does not match the existing set type. Both sets - // must have the same primitive data type. For example, if the existing data type - // is a set of strings, the Value must also be a set of strings. The ADD action - // only supports Number and set data types. In addition, ADD can only be used on - // top-level attributes, not nested attributes. - // - DELETE - Deletes an element from a set. If a set of values is specified, - // then those values are subtracted from the old set. For example, if the attribute - // value was the set [a,b,c] and the DELETE action specifies [a,c] , then the - // final attribute value is [b] . Specifying an empty set is an error. The DELETE - // action only supports set data types. In addition, DELETE can only be used on - // top-level attributes, not nested attributes. + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, ADD can + // only be used on top-level attributes, not nested attributes. + // + // - DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specifies [a,c] , then the final attribute value is [b] . Specifying an + // empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can only be + // used on top-level attributes, not nested attributes. + // // You can have many actions in a single expression, such as the following: SET - // a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on - // update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) - // in the Amazon DynamoDB Developer Guide. + // a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see [Modifying Items and Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Modifying Items and Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html UpdateExpression *string noSmithyDocumentSerde @@ -214,33 +290,41 @@ type UpdateItemInput struct { type UpdateItemOutput struct { // A map of attribute values as they appear before or after the UpdateItem - // operation, as determined by the ReturnValues parameter. The Attributes map is - // only present if the update was successful and ReturnValues was specified as - // something other than NONE in the request. Each element represents one attribute. + // operation, as determined by the ReturnValues parameter. + // + // The Attributes map is only present if the update was successful and ReturnValues + // was specified as something other than NONE in the request. Each element + // represents one attribute. Attributes map[string]types.AttributeValue // The capacity units consumed by the UpdateItem operation. The data returned // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) - // in the Amazon DynamoDB Developer Guide. + // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the UpdateItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. @@ -307,6 +391,12 @@ func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateItemValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go index 3d2272ad7f5..583fc2000b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go @@ -125,6 +125,12 @@ func (c *Client) addOperationUpdateKinesisStreamingDestinationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go index a51de71d5a2..0d436352450 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go @@ -13,11 +13,17 @@ import ( ) // Modifies the provisioned throughput settings, global secondary indexes, or -// DynamoDB Streams settings for a given table. This operation only applies to -// Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. You can only perform one of the following operations at once: +// DynamoDB Streams settings for a given table. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// +// You can only perform one of the following operations at once: +// // - Modify the provisioned throughput settings of the table. +// // - Remove a global secondary index from the table. +// // - Create a new global secondary index on the table. After the index begins // backfilling, you can use UpdateTable to perform other operations. // @@ -59,12 +65,15 @@ type UpdateTableInput struct { // provisioned capacity values must be set. The initial provisioned capacity values // are estimated based on the consumed read and write capacity of your table and // global secondary indexes over the past 30 minutes. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html BillingMode types.BillingMode // Indicates whether deletion protection is to be enabled (true) or disabled @@ -73,29 +82,44 @@ type UpdateTableInput struct { // An array of one or more global secondary indexes for the table. For each index // in the array, you can request one action: + // // - Create - add a new global secondary index to the table. + // // - Update - modify the provisioned throughput settings of an existing global // secondary index. + // // - Delete - remove a global secondary index from the table. + // // You can create or delete only one global secondary index per UpdateTable - // operation. For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) - // in the Amazon DynamoDB Developer Guide. + // operation. + // + // For more information, see [Managing Global Secondary Indexes] in the Amazon DynamoDB Developer Guide. + // + // [Managing Global Secondary Indexes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexUpdate + // Updates the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + // The new provisioned throughput settings for the specified table or index. ProvisionedThroughput *types.ProvisionedThroughput // A list of replica update actions (create, delete, or update) for the table. - // This property only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) - // of global tables. + // + // For global tables, this property only applies to global tables using Version + // 2019.11.21 (Current version). ReplicaUpdates []types.ReplicationGroupUpdate // The new server-side encryption settings for the specified table. SSESpecification *types.SSESpecification - // Represents the DynamoDB Streams configuration for the table. You receive a - // ValidationException if you try to enable a stream on a table that already has a - // stream, or if you try to disable a stream on a table that doesn't have a stream. + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ValidationException if you try to enable a stream on a table that + // already has a stream, or if you try to disable a stream on a table that doesn't + // have a stream. StreamSpecification *types.StreamSpecification // The table class of the table to be updated. Valid values are STANDARD and @@ -175,6 +199,12 @@ func (c *Client) addOperationUpdateTableMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go index ff84b122e75..e22377680bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go @@ -11,9 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates auto scaling settings on your global tables at once. This operation -// only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// Updates auto scaling settings on your global tables at once. +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). func (c *Client) UpdateTableReplicaAutoScaling(ctx context.Context, params *UpdateTableReplicaAutoScalingInput, optFns ...func(*Options)) (*UpdateTableReplicaAutoScalingOutput, error) { if params == nil { params = &UpdateTableReplicaAutoScalingInput{} @@ -118,6 +119,12 @@ func (c *Client) addOperationUpdateTableReplicaAutoScalingMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateTableReplicaAutoScalingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go index e6f79845416..3a044e4245b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go @@ -16,20 +16,31 @@ import ( // specified table. A successful UpdateTimeToLive call returns the current // TimeToLiveSpecification . It can take up to one hour for the change to fully // process. Any additional UpdateTimeToLive calls for the same table during this -// one hour duration result in a ValidationException . TTL compares the current -// time in epoch time format to the time stored in the TTL attribute of an item. If -// the epoch time value stored in the attribute is less than the current time, the -// item is marked as expired and subsequently deleted. The epoch time format is the -// number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC. DynamoDB -// deletes expired items on a best-effort basis to ensure availability of -// throughput for other data operations. DynamoDB typically deletes expired items -// within two days of expiration. The exact duration within which an item gets -// deleted after expiration is specific to the nature of the workload. Items that -// have expired and not been deleted will still show up in reads, queries, and -// scans. As items are deleted, they are removed from any local secondary index and +// one hour duration result in a ValidationException . +// +// TTL compares the current time in epoch time format to the time stored in the +// TTL attribute of an item. If the epoch time value stored in the attribute is +// less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability of +// throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific to +// the nature of the workload. Items that have expired and not been deleted will +// still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and // global secondary index immediately in the same eventually consistent way as a -// standard delete operation. For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) -// in the Amazon DynamoDB Developer Guide. +// standard delete operation. +// +// For more information, see [Time To Live] in the Amazon DynamoDB Developer Guide. +// +// [Time To Live]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html func (c *Client) UpdateTimeToLive(ctx context.Context, params *UpdateTimeToLiveInput, optFns ...func(*Options)) (*UpdateTimeToLiveOutput, error) { if params == nil { params = &UpdateTimeToLiveInput{} @@ -132,6 +143,12 @@ func (c *Client) addOperationUpdateTimeToLiveMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateTimeToLiveValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go index 817a2851839..4e3363f5033 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go index be3f239b434..7d35718602a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go @@ -20,8 +20,17 @@ import ( "io/ioutil" "math" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsjson10_deserializeOpBatchExecuteStatement struct { } @@ -10628,6 +10637,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(v **types.GlobalSecond return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -10736,6 +10750,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(v **types.G return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -10859,6 +10878,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(v **types.GlobalSe return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -12986,6 +13010,107 @@ func awsAwsjson10_deserializeDocumentNumberSetAttributeValue(v *[]string, value return nil } +func awsAwsjson10_deserializeDocumentOnDemandThroughput(v **types.OnDemandThroughput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughput + if *v == nil { + sv = &types.OnDemandThroughput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + case "MaxWriteRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxWriteRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(v **types.OnDemandThroughputOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughputOverride + if *v == nil { + sv = &types.OnDemandThroughputOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentPartiQLBatchResponse(v *[]types.BatchStatementResponse, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13744,6 +13869,11 @@ func awsAwsjson10_deserializeDocumentReplicaDescription(v **types.ReplicaDescrip sv.KMSMasterKeyId = ptr.String(jtv) } + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + case "ProvisionedThroughputOverride": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { return err @@ -13973,6 +14103,11 @@ func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(v ** sv.IndexName = ptr.String(jtv) } + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + case "ProvisionedThroughputOverride": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { return err @@ -14684,6 +14819,11 @@ func awsAwsjson10_deserializeDocumentSourceTableDetails(v **types.SourceTableDet return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { return err @@ -15223,6 +15363,11 @@ func awsAwsjson10_deserializeDocumentTableCreationParameters(v **types.TableCrea return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { return err @@ -15368,6 +15513,11 @@ func awsAwsjson10_deserializeDocumentTableDescription(v **types.TableDescription return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go index 45ddcae39bd..53f36085a57 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go @@ -3,19 +3,24 @@ // Package dynamodb provides the API client, operations, and parameter types for // Amazon DynamoDB. // -// Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that -// provides fast and predictable performance with seamless scalability. DynamoDB -// lets you offload the administrative burdens of operating and scaling a -// distributed database, so that you don't have to worry about hardware -// provisioning, setup and configuration, replication, software patching, or -// cluster scaling. With DynamoDB, you can create database tables that can store -// and retrieve any amount of data, and serve any level of request traffic. You can -// scale up or scale down your tables' throughput capacity without downtime or -// performance degradation, and use the Amazon Web Services Management Console to -// monitor resource utilization and performance metrics. DynamoDB automatically -// spreads the data and traffic for your tables over a sufficient number of servers -// to handle your throughput and storage requirements, while maintaining consistent -// and fast performance. All of your data is stored on solid state disks (SSDs) and -// automatically replicated across multiple Availability Zones in an Amazon Web -// Services Region, providing built-in high availability and data durability. +// # Amazon DynamoDB +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you offload +// the administrative burdens of operating and scaling a distributed database, so +// that you don't have to worry about hardware provisioning, setup and +// configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve any +// amount of data, and serve any level of request traffic. You can scale up or +// scale down your tables' throughput capacity without downtime or performance +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over a +// sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored on +// solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go index 78ad773153f..5c948ddff7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go @@ -289,6 +289,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -496,7 +507,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -526,6 +537,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -535,7 +550,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go index b60959f52f7..dba2026c8dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go @@ -3,4 +3,4 @@ package dynamodb // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.1" +const goModuleVersion = "1.34.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go index bf88e3b7635..4380a50265a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -61,8 +64,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -85,17 +90,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -112,8 +120,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -158,6 +167,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go index 0a5c1e813c9..eb9d1999bec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go @@ -3547,6 +3547,13 @@ func awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v *types.Cre } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.Projection != nil { ok := object.Key("Projection") if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { @@ -3592,6 +3599,13 @@ func awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v *types.C ok.String(*v.KMSMasterKeyId) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -3880,6 +3894,13 @@ func awsAwsjson10_serializeDocumentGlobalSecondaryIndex(v *types.GlobalSecondary } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.Projection != nil { ok := object.Key("Projection") if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { @@ -4263,6 +4284,35 @@ func awsAwsjson10_serializeDocumentNumberSetAttributeValue(v []string, value smi return nil } +func awsAwsjson10_serializeDocumentOnDemandThroughput(v *types.OnDemandThroughput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + if v.MaxWriteRequestUnits != nil { + ok := object.Key("MaxWriteRequestUnits") + ok.Long(*v.MaxWriteRequestUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v *types.OnDemandThroughputOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + return nil +} + func awsAwsjson10_serializeDocumentParameterizedStatement(v *types.ParameterizedStatement, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4522,6 +4572,13 @@ func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(v *types.ReplicaG ok.String(*v.IndexName) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -4853,6 +4910,13 @@ func awsAwsjson10_serializeDocumentTableCreationParameters(v *types.TableCreatio } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -5065,6 +5129,13 @@ func awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v *types.Upd ok.String(*v.IndexName) } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -5103,6 +5174,13 @@ func awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v *types.U ok.String(*v.KMSMasterKeyId) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -5297,6 +5375,13 @@ func awsAwsjson10_serializeOpDocumentCreateTableInput(v *CreateTableInput, value } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -6276,6 +6361,13 @@ func awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(v *RestoreTable } } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { @@ -6321,6 +6413,13 @@ func awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(v *RestoreTa } } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { @@ -6783,6 +6882,13 @@ func awsAwsjson10_serializeOpDocumentUpdateTableInput(v *UpdateTableInput, value } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go index ec617f936bd..73e4795bc0f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go @@ -12,8 +12,9 @@ const ( // Values returns all known values for ApproximateCreationDateTimePrecision. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ApproximateCreationDateTimePrecision) Values() []ApproximateCreationDateTimePrecision { return []ApproximateCreationDateTimePrecision{ "MILLISECOND", @@ -31,8 +32,9 @@ const ( ) // Values returns all known values for AttributeAction. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (AttributeAction) Values() []AttributeAction { return []AttributeAction{ "ADD", @@ -51,8 +53,9 @@ const ( ) // Values returns all known values for BackupStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupStatus) Values() []BackupStatus { return []BackupStatus{ "CREATING", @@ -71,8 +74,9 @@ const ( ) // Values returns all known values for BackupType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupType) Values() []BackupType { return []BackupType{ "USER", @@ -92,8 +96,9 @@ const ( ) // Values returns all known values for BackupTypeFilter. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupTypeFilter) Values() []BackupTypeFilter { return []BackupTypeFilter{ "USER", @@ -122,6 +127,7 @@ const ( // Values returns all known values for BatchStatementErrorCodeEnum. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BatchStatementErrorCodeEnum) Values() []BatchStatementErrorCodeEnum { return []BatchStatementErrorCodeEnum{ @@ -148,8 +154,9 @@ const ( ) // Values returns all known values for BillingMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BillingMode) Values() []BillingMode { return []BillingMode{ "PROVISIONED", @@ -177,8 +184,9 @@ const ( ) // Values returns all known values for ComparisonOperator. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ComparisonOperator) Values() []ComparisonOperator { return []ComparisonOperator{ "EQ", @@ -206,8 +214,9 @@ const ( ) // Values returns all known values for ConditionalOperator. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ConditionalOperator) Values() []ConditionalOperator { return []ConditionalOperator{ "AND", @@ -224,8 +233,9 @@ const ( ) // Values returns all known values for ContinuousBackupsStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ContinuousBackupsStatus) Values() []ContinuousBackupsStatus { return []ContinuousBackupsStatus{ "ENABLED", @@ -243,6 +253,7 @@ const ( // Values returns all known values for ContributorInsightsAction. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ContributorInsightsAction) Values() []ContributorInsightsAction { return []ContributorInsightsAction{ @@ -264,6 +275,7 @@ const ( // Values returns all known values for ContributorInsightsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ContributorInsightsStatus) Values() []ContributorInsightsStatus { return []ContributorInsightsStatus{ @@ -288,8 +300,9 @@ const ( ) // Values returns all known values for DestinationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DestinationStatus) Values() []DestinationStatus { return []DestinationStatus{ "ENABLING", @@ -310,8 +323,9 @@ const ( ) // Values returns all known values for ExportFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportFormat) Values() []ExportFormat { return []ExportFormat{ "DYNAMODB_JSON", @@ -329,8 +343,9 @@ const ( ) // Values returns all known values for ExportStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportStatus) Values() []ExportStatus { return []ExportStatus{ "IN_PROGRESS", @@ -348,8 +363,9 @@ const ( ) // Values returns all known values for ExportType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportType) Values() []ExportType { return []ExportType{ "FULL_EXPORT", @@ -366,8 +382,9 @@ const ( ) // Values returns all known values for ExportViewType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportViewType) Values() []ExportViewType { return []ExportViewType{ "NEW_IMAGE", @@ -386,8 +403,9 @@ const ( ) // Values returns all known values for GlobalTableStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (GlobalTableStatus) Values() []GlobalTableStatus { return []GlobalTableStatus{ "CREATING", @@ -409,8 +427,9 @@ const ( ) // Values returns all known values for ImportStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ImportStatus) Values() []ImportStatus { return []ImportStatus{ "IN_PROGRESS", @@ -432,8 +451,9 @@ const ( ) // Values returns all known values for IndexStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IndexStatus) Values() []IndexStatus { return []IndexStatus{ "CREATING", @@ -453,8 +473,9 @@ const ( ) // Values returns all known values for InputCompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InputCompressionType) Values() []InputCompressionType { return []InputCompressionType{ "GZIP", @@ -473,8 +494,9 @@ const ( ) // Values returns all known values for InputFormat. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InputFormat) Values() []InputFormat { return []InputFormat{ "DYNAMODB_JSON", @@ -492,8 +514,9 @@ const ( ) // Values returns all known values for KeyType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (KeyType) Values() []KeyType { return []KeyType{ "HASH", @@ -511,6 +534,7 @@ const ( // Values returns all known values for PointInTimeRecoveryStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (PointInTimeRecoveryStatus) Values() []PointInTimeRecoveryStatus { return []PointInTimeRecoveryStatus{ @@ -529,8 +553,9 @@ const ( ) // Values returns all known values for ProjectionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ProjectionType) Values() []ProjectionType { return []ProjectionType{ "ALL", @@ -553,8 +578,9 @@ const ( ) // Values returns all known values for ReplicaStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaStatus) Values() []ReplicaStatus { return []ReplicaStatus{ "CREATING", @@ -577,8 +603,9 @@ const ( ) // Values returns all known values for ReturnConsumedCapacity. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnConsumedCapacity) Values() []ReturnConsumedCapacity { return []ReturnConsumedCapacity{ "INDEXES", @@ -597,6 +624,7 @@ const ( // Values returns all known values for ReturnItemCollectionMetrics. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReturnItemCollectionMetrics) Values() []ReturnItemCollectionMetrics { return []ReturnItemCollectionMetrics{ @@ -617,8 +645,9 @@ const ( ) // Values returns all known values for ReturnValue. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnValue) Values() []ReturnValue { return []ReturnValue{ "NONE", @@ -639,8 +668,9 @@ const ( // Values returns all known values for ReturnValuesOnConditionCheckFailure. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnValuesOnConditionCheckFailure) Values() []ReturnValuesOnConditionCheckFailure { return []ReturnValuesOnConditionCheckFailure{ "ALL_OLD", @@ -657,8 +687,9 @@ const ( ) // Values returns all known values for S3SseAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (S3SseAlgorithm) Values() []S3SseAlgorithm { return []S3SseAlgorithm{ "AES256", @@ -676,8 +707,9 @@ const ( ) // Values returns all known values for ScalarAttributeType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ScalarAttributeType) Values() []ScalarAttributeType { return []ScalarAttributeType{ "S", @@ -697,8 +729,9 @@ const ( ) // Values returns all known values for Select. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Select) Values() []Select { return []Select{ "ALL_ATTRIBUTES", @@ -720,8 +753,9 @@ const ( ) // Values returns all known values for SSEStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SSEStatus) Values() []SSEStatus { return []SSEStatus{ "ENABLING", @@ -741,8 +775,9 @@ const ( ) // Values returns all known values for SSEType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SSEType) Values() []SSEType { return []SSEType{ "AES256", @@ -761,8 +796,9 @@ const ( ) // Values returns all known values for StreamViewType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamViewType) Values() []StreamViewType { return []StreamViewType{ "NEW_IMAGE", @@ -781,8 +817,9 @@ const ( ) // Values returns all known values for TableClass. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TableClass) Values() []TableClass { return []TableClass{ "STANDARD", @@ -804,8 +841,9 @@ const ( ) // Values returns all known values for TableStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TableStatus) Values() []TableStatus { return []TableStatus{ "CREATING", @@ -829,8 +867,9 @@ const ( ) // Values returns all known values for TimeToLiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TimeToLiveStatus) Values() []TimeToLiveStatus { return []TimeToLiveStatus{ "ENABLING", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go index d11b3b8b687..97bf7dbd502 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go @@ -116,7 +116,8 @@ func (e *ContinuousBackupsUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// There was an attempt to insert an item with the same primary key as an item +// There was an attempt to insert an item with the same primary key as an item +// // that already exists in the DynamoDB table. type DuplicateItemException struct { Message *string @@ -276,7 +277,8 @@ func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// There was a conflict when importing from the specified S3 source. This can +// There was a conflict when importing from the specified S3 source. This can +// // occur when the current import conflicts with a previous import request that had // the same client token. type ImportConflictException struct { @@ -490,17 +492,26 @@ func (e *ItemCollectionSizeLimitExceededException) ErrorFault() smithy.ErrorFaul } // There is no limit to the number of daily on-demand backups that can be taken. +// // For most purposes, up to 500 simultaneous table operations are allowed per // account. These operations include CreateTable , UpdateTable , DeleteTable , -// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . When -// you are creating a table with one or more secondary indexes, you can have up to -// 250 such requests running at a time. However, if the table or index +// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . +// +// When you are creating a table with one or more secondary indexes, you can have +// up to 250 such requests running at a time. However, if the table or index // specifications are complex, then DynamoDB might temporarily reduce the number of -// concurrent operations. When importing into DynamoDB, up to 50 simultaneous -// import table operations are allowed per account. There is a soft account quota -// of 2,500 tables. GetRecords was called with a value of more than 1000 for the -// limit request parameter. More than 2 processes are reading from the same streams -// shard at the same time. Exceeding this limit may result in request throttling. +// concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations are +// allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same time. +// Exceeding this limit may result in request throttling. type LimitExceededException struct { Message *string @@ -554,9 +565,10 @@ func (e *PointInTimeRecoveryUnavailableException) ErrorFault() smithy.ErrorFault return smithy.FaultClient } -// The operation tried to access a nonexistent resource-based policy. If you -// specified an ExpectedRevisionId , it's possible that a policy is present for the -// resource but its revision ID didn't match the expected value. +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId , it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. type PolicyNotFoundException struct { Message *string @@ -586,8 +598,9 @@ func (e *PolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy // automatically retry requests that receive this exception. Your request is // eventually successful, unless your retry queue is too large to finish. Reduce // the frequency of requests and use exponential backoff. For more information, go -// to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// to [Error Retries and Exponential Backoff]in the Amazon DynamoDB Developer Guide. +// +// [Error Retries and Exponential Backoff]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff type ProvisionedThroughputExceededException struct { Message *string @@ -668,8 +681,9 @@ func (e *ReplicaNotFoundException) ErrorCode() string { func (e *ReplicaNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// contact [Amazon Web Services Support]to request a quota increase. +// +// [Amazon Web Services Support]: https://aws.amazon.com/support type RequestLimitExceeded struct { Message *string @@ -830,92 +844,150 @@ func (e *TableNotFoundException) ErrorCode() string { } func (e *TableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The entire transaction request was canceled. DynamoDB cancels a -// TransactWriteItems request under the following circumstances: +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// // - A condition in one of the condition expressions is not met. +// // - A table in the TransactWriteItems request is in a different account or // region. +// // - More than one action in the TransactWriteItems operation targets the same // item. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - An item size becomes too large (larger than 400 KB), or a local secondary // index (LSI) becomes too large, or a similar validation error occurs because of // changes made by the transaction. +// // - There is a user error, such as an invalid data format. +// // - There is an ongoing TransactWriteItems operation that conflicts with a // concurrent TransactWriteItems request. In this case the TransactWriteItems // operation fails with a TransactionCanceledException . // // DynamoDB cancels a TransactGetItems request under the following circumstances: +// // - There is an ongoing TransactGetItems operation that conflicts with a // concurrent PutItem , UpdateItem , DeleteItem or TransactWriteItems request. In // this case the TransactGetItems operation fails with a // TransactionCanceledException . +// // - A table in the TransactGetItems request is in a different account or region. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - There is a user error, such as an invalid data format. // // If using Java, DynamoDB lists the cancellation reasons on the // CancellationReasons property. This property is not set for other languages. // Transaction cancellation reasons are ordered in the order of requested items, if -// an item has no error it will have None code and Null message. Cancellation -// reason codes and possible error messages: +// an item has no error it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// // - No Errors: +// // - Code: None +// // - Message: null +// // - Conditional Check Failed: +// // - Code: ConditionalCheckFailed +// // - Message: The conditional request failed. +// // - Item Collection Size Limit Exceeded: +// // - Code: ItemCollectionSizeLimitExceeded +// // - Message: Collection size exceeded. +// // - Transaction Conflict: +// // - Code: TransactionConflict +// // - Message: Transaction is ongoing for the item. +// // - Provisioned Throughput Exceeded: +// // - Code: ProvisionedThroughputExceeded +// // - Messages: +// // - The level of configured provisioned throughput for the table was exceeded. -// Consider increasing your provisioning level with the UpdateTable API. This -// Message is received when provisioned throughput is exceeded is on a provisioned -// DynamoDB table. -// - The level of configured provisioned throughput for one or more global -// secondary indexes of the table was exceeded. Consider increasing your -// provisioning level for the under-provisioned global secondary indexes with the -// UpdateTable API. This message is returned when provisioned throughput is -// exceeded is on a provisioned GSI. -// - Throttling Error: -// - Code: ThrottlingError -// - Messages: -// - Throughput exceeds the current capacity of your table or index. DynamoDB is -// automatically scaling your table or index so please try again shortly. If -// exceptions persist, check if you have a hot key: -// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table as -// DynamoDB is automatically scaling the table. -// - Throughput exceeds the current capacity for one or more global secondary -// indexes. DynamoDB is automatically scaling your index so please try again -// shortly. This message is returned when writes get throttled on an On-Demand GSI -// as DynamoDB is automatically scaling the GSI. -// - Validation Error: -// - Code: ValidationError -// - Messages: -// - One or more parameter values were invalid. -// - The update expression attempted to update the secondary index key beyond -// allowed size limits. -// - The update expression attempted to update the secondary index key to -// unsupported type. -// - An operand in the update expression has an incorrect data type. -// - Item size to update has exceeded the maximum allowed size. -// - Number overflow. Attempting to store a number with magnitude larger than -// supported range. -// - Type mismatch for attribute to update. -// - Nesting Levels have exceeded supported limits. -// - The document path provided in the update expression is invalid for update. -// - The provided expression refers to an attribute that does not exist in the -// item. +// Consider increasing your provisioning level with the UpdateTable API. +// +// This Message is received when provisioned throughput is exceeded is on a +// +// provisioned DynamoDB table. +// +// - The level of configured provisioned throughput for one or more global +// secondary indexes of the table was exceeded. Consider increasing your +// provisioning level for the under-provisioned global secondary indexes with the +// UpdateTable API. +// +// This message is returned when provisioned throughput is exceeded is on a +// +// provisioned GSI. +// +// - Throttling Error: +// +// - Code: ThrottlingError +// +// - Messages: +// +// - Throughput exceeds the current capacity of your table or index. DynamoDB is +// automatically scaling your table or index so please try again shortly. If +// exceptions persist, check if you have a hot key: +// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// +// This message is returned when writes get throttled on an On-Demand table as +// +// DynamoDB is automatically scaling the table. +// +// - Throughput exceeds the current capacity for one or more global secondary +// indexes. DynamoDB is automatically scaling your index so please try again +// shortly. +// +// This message is returned when writes get throttled on an On-Demand GSI as +// +// DynamoDB is automatically scaling the GSI. +// +// - Validation Error: +// +// - Code: ValidationError +// +// - Messages: +// +// - One or more parameter values were invalid. +// +// - The update expression attempted to update the secondary index key beyond +// allowed size limits. +// +// - The update expression attempted to update the secondary index key to +// unsupported type. +// +// - An operand in the update expression has an incorrect data type. +// +// - Item size to update has exceeded the maximum allowed size. +// +// - Number overflow. Attempting to store a number with magnitude larger than +// supported range. +// +// - Type mismatch for attribute to update. +// +// - Nesting Levels have exceeded supported limits. +// +// - The document path provided in the update expression is invalid for update. +// +// - The provided expression refers to an attribute that does not exist in the +// item. type TransactionCanceledException struct { Message *string @@ -970,31 +1042,48 @@ func (e *TransactionConflictException) ErrorCode() string { func (e *TransactionConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The transaction with the given request token is already in progress. -// Recommended Settings This is a general recommendation for handling the -// TransactionInProgressException . These settings help ensure that the client -// retries will trigger completion of the ongoing TransactWriteItems request. +// +// # Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException +// . These settings help ensure that the client retries will trigger completion of +// the ongoing TransactWriteItems request. +// // - Set clientExecutionTimeout to a value that allows at least one retry to be // processed after 5 seconds have elapsed since the first attempt for the // TransactWriteItems operation. +// // - Set socketTimeout to a value a little lower than the requestTimeout setting. +// // - requestTimeout should be set based on the time taken for the individual // retries of a single HTTP request for your use case, but setting it to 1 second // or higher should work well to reduce chances of retries and // TransactionInProgressException errors. +// // - Use exponential backoff when retrying and tune backoff if needed. // -// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97) -// , example timeout settings based on the guidelines above are as follows: Example -// timeline: +// Assuming [default retry policy], example timeout settings based on the guidelines above are as +// follows: +// +// Example timeline: +// // - 0-1000 first attempt +// // - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base delay // for 4xx errors) +// // - 1500-2500 second attempt +// // - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// // - 3500-4500 third attempt +// // - 4500-6500 third sleep/delay (500 * 2^2) +// // - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds // have elapsed since the first attempt reached TC) +// +// [default retry policy]: https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97 type TransactionInProgressException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go index 437acc5dae9..31f30fcee3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go @@ -20,6 +20,7 @@ type ArchivalSummary struct { ArchivalDateTime *time.Time // The reason DynamoDB archived the table. Currently, the only possible value is: + // // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to the // table's KMS key being inaccessible for more than seven days. An On-Demand backup // was created at the archival time. @@ -37,8 +38,11 @@ type AttributeDefinition struct { AttributeName *string // The data type for the attribute, where: + // // - S - the attribute is of type String + // // - N - the attribute is of type Number + // // - B - the attribute is of type Binary // // This member is required. @@ -47,10 +51,12 @@ type AttributeDefinition struct { noSmithyDocumentSerde } -// Represents the data for an attribute. Each attribute value is described as a -// name-value pair. The name is the data type, and the value is the data itself. -// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) -// in the Amazon DynamoDB Developer Guide. +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. // // The following types satisfy this interface: // @@ -64,12 +70,15 @@ type AttributeDefinition struct { // AttributeValueMemberNULL // AttributeValueMemberS // AttributeValueMemberSS +// +// [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes type AttributeValue interface { isAttributeValue() } -// An attribute of type Binary. For example: "B": -// "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" +// An attribute of type Binary. For example: +// +// "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" type AttributeValueMemberB struct { Value []byte @@ -78,7 +87,9 @@ type AttributeValueMemberB struct { func (*AttributeValueMemberB) isAttributeValue() {} -// An attribute of type Boolean. For example: "BOOL": true +// An attribute of type Boolean. For example: +// +// "BOOL": true type AttributeValueMemberBOOL struct { Value bool @@ -87,8 +98,9 @@ type AttributeValueMemberBOOL struct { func (*AttributeValueMemberBOOL) isAttributeValue() {} -// An attribute of type Binary Set. For example: "BS": ["U3Vubnk=", "UmFpbnk=", -// "U25vd3k="] +// An attribute of type Binary Set. For example: +// +// "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] type AttributeValueMemberBS struct { Value [][]byte @@ -97,8 +109,9 @@ type AttributeValueMemberBS struct { func (*AttributeValueMemberBS) isAttributeValue() {} -// An attribute of type List. For example: "L": [ {"S": "Cookies"} , {"S": -// "Coffee"}, {"N": "3.14159"}] +// An attribute of type List. For example: +// +// "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] type AttributeValueMemberL struct { Value []AttributeValue @@ -107,8 +120,9 @@ type AttributeValueMemberL struct { func (*AttributeValueMemberL) isAttributeValue() {} -// An attribute of type Map. For example: "M": {"Name": {"S": "Joe"}, "Age": {"N": -// "35"}} +// An attribute of type Map. For example: +// +// "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} type AttributeValueMemberM struct { Value map[string]AttributeValue @@ -117,10 +131,13 @@ type AttributeValueMemberM struct { func (*AttributeValueMemberM) isAttributeValue() {} -// An attribute of type Number. For example: "N": "123.45" Numbers are sent across -// the network to DynamoDB as strings, to maximize compatibility across languages -// and libraries. However, DynamoDB treats them as number type attributes for -// mathematical operations. +// An attribute of type Number. For example: +// +// "N": "123.45" +// +// Numbers are sent across the network to DynamoDB as strings, to maximize +// compatibility across languages and libraries. However, DynamoDB treats them as +// number type attributes for mathematical operations. type AttributeValueMemberN struct { Value string @@ -129,8 +146,11 @@ type AttributeValueMemberN struct { func (*AttributeValueMemberN) isAttributeValue() {} -// An attribute of type Number Set. For example: "NS": ["42.2", "-19", "7.5", -// "3.14"] Numbers are sent across the network to DynamoDB as strings, to maximize +// An attribute of type Number Set. For example: +// +// "NS": ["42.2", "-19", "7.5", "3.14"] +// +// Numbers are sent across the network to DynamoDB as strings, to maximize // compatibility across languages and libraries. However, DynamoDB treats them as // number type attributes for mathematical operations. type AttributeValueMemberNS struct { @@ -141,7 +161,9 @@ type AttributeValueMemberNS struct { func (*AttributeValueMemberNS) isAttributeValue() {} -// An attribute of type Null. For example: "NULL": true +// An attribute of type Null. For example: +// +// "NULL": true type AttributeValueMemberNULL struct { Value bool @@ -150,7 +172,9 @@ type AttributeValueMemberNULL struct { func (*AttributeValueMemberNULL) isAttributeValue() {} -// An attribute of type String. For example: "S": "Hello" +// An attribute of type String. For example: +// +// "S": "Hello" type AttributeValueMemberS struct { Value string @@ -159,8 +183,9 @@ type AttributeValueMemberS struct { func (*AttributeValueMemberS) isAttributeValue() {} -// An attribute of type String Set. For example: "SS": ["Giraffe", "Hippo" -// ,"Zebra"] +// An attribute of type String Set. For example: +// +// "SS": ["Giraffe", "Hippo" ,"Zebra"] type AttributeValueMemberSS struct { Value []string @@ -170,64 +195,89 @@ type AttributeValueMemberSS struct { func (*AttributeValueMemberSS) isAttributeValue() {} // For the UpdateItem operation, represents the attributes to be modified, the -// action to perform on each, and the new value for each. You cannot use UpdateItem -// to update any primary key attributes. Instead, you will need to delete the item, -// and then use PutItem to create a new item with new attributes. Attribute values -// cannot be null; string and binary type attributes must have lengths greater than -// zero; and set type attributes must not be empty. Requests with empty values will -// be rejected with a ValidationException exception. +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, you +// will need to delete the item, and then use PutItem to create a new item with +// new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. type AttributeValueUpdate struct { // Specifies how to perform the update. Valid values are PUT (default), DELETE , // and ADD . The behavior depends on whether the specified primary key already - // exists in the table. If an item with the specified Key is found in the table: + // exists in the table. + // + // If an item with the specified Key is found in the table: + // // - PUT - Adds the specified attribute to the item. If the attribute already // exists, it is replaced by the new value. + // // - DELETE - If no value is specified, the attribute and its value are removed // from the item. The data type of the specified value must match the existing - // value's data type. If a set of values is specified, then those values are - // subtracted from the old set. For example, if the attribute value was the set - // [a,b,c] and the DELETE action specified [a,c] , then the final attribute value - // would be [b] . Specifying an empty set is an error. + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specified [a,c] , then the final attribute value would be [b] . + // Specifying an empty set is an error. + // // - ADD - If the attribute does not already exist, then the attribute and its // values are added to the item. If the attribute does exist, then the behavior of // ADD depends on the data type of the attribute: + // // - If the existing attribute is a number, and if Value is also a number, then // the Value is mathematically added to the existing attribute. If Value is a - // negative number, then it is subtracted from the existing attribute. If you use - // ADD to increment or decrement a number value for an item that doesn't exist - // before the update, DynamoDB uses 0 as the initial value. In addition, if you use - // ADD to update an existing item, and intend to increment or decrement an - // attribute value which does not yet exist, DynamoDB uses 0 as the initial - // value. For example, suppose that the item you want to update does not yet have - // an attribute named itemcount, but you decide to ADD the number 3 to this - // attribute anyway, even though it currently does not exist. DynamoDB will create - // the itemcount attribute, set its initial value to 0 , and finally add 3 to it. - // The result will be a new itemcount attribute in the item, with a value of 3 . + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 as + // the initial value. For example, suppose that the item you want to update does + // not yet have an attribute named itemcount, but you decide to ADD the number 3 + // to this attribute anyway, even though it currently does not exist. DynamoDB will + // create the itemcount attribute, set its initial value to 0 , and finally add 3 + // to it. The result will be a new itemcount attribute in the item, with a value of + // 3 . + // // - If the existing data type is a set, and if the Value is also a set, then the // Value is added to the existing set. (This is a set operation, not mathematical // addition.) For example, if the attribute value was the set [1,2] , and the ADD // action specified [3] , then the final attribute value would be [1,2,3] . An // error occurs if an Add action is specified for a set attribute and the attribute - // type specified does not match the existing set type. Both sets must have the - // same primitive data type. For example, if the existing data type is a set of - // strings, the Value must also be a set of strings. The same holds true for - // number sets and binary sets. This action is only valid for an existing - // attribute whose data type is number or is a set. Do not use ADD for any other - // data types. + // type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is number + // or is a set. Do not use ADD for any other data types. + // // If no item with the specified Key is found: + // // - PUT - DynamoDB creates a new item with the specified primary key, and then // adds the attribute. + // // - DELETE - Nothing happens; there is no attribute to delete. + // // - ADD - DynamoDB creates a new item with the supplied primary key and number // (or set) for the attribute value. The only data types allowed are number, number // set, string set or binary set. Action AttributeAction - // Represents the data for an attribute. Each attribute value is described as a - // name-value pair. The name is the data type, and the value is the data itself. - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes Value AttributeValue noSmithyDocumentSerde @@ -417,11 +467,14 @@ type BackupDetails struct { BackupStatus BackupStatus // BackupType: + // // - USER - You create and manage these using the on-demand backup feature. + // // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM // backup is automatically created and is retained for 35 days (at no additional // cost). System backups allow you to restore the deleted table to the state it was // in just before the point of deletion. + // // - AWS_BACKUP - On-demand backup created by you from Backup service. // // This member is required. @@ -461,11 +514,14 @@ type BackupSummary struct { BackupStatus BackupStatus // BackupType: + // // - USER - You create and manage these using the on-demand backup feature. + // // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM // backup is automatically created and is retained for 35 days (at no additional // cost). System backups allow you to restore the deleted table to the state it was // in just before the point of deletion. + // // - AWS_BACKUP - On-demand backup created by you from Backup service. BackupType BackupType @@ -484,14 +540,14 @@ type BackupSummary struct { // An error associated with a statement in a PartiQL batch that was run. type BatchStatementError struct { - // The error code associated with the failed PartiQL batch statement. + // The error code associated with the failed PartiQL batch statement. Code BatchStatementErrorCodeEnum // The item which caused the condition check to fail. This will be set if // ReturnValuesOnConditionCheckFailure is specified as ALL_OLD . Item map[string]AttributeValue - // The error message associated with the PartiQL batch response. + // The error message associated with the PartiQL batch response. Message *string noSmithyDocumentSerde @@ -500,22 +556,23 @@ type BatchStatementError struct { // A PartiQL batch statement request. type BatchStatementRequest struct { - // A valid PartiQL statement. + // A valid PartiQL statement. // // This member is required. Statement *string - // The read consistency of the PartiQL batch request. + // The read consistency of the PartiQL batch request. ConsistentRead *bool - // The parameters associated with a PartiQL statement in the batch request. + // The parameters associated with a PartiQL statement in the batch request. Parameters []AttributeValue // An optional parameter that returns the item attributes for a PartiQL batch - // request operation that failed a condition check. There is no additional cost - // associated with requesting a return value aside from the small network and - // processing overhead of receiving a larger response. No read capacity units are - // consumed. + // request operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -524,13 +581,13 @@ type BatchStatementRequest struct { // A PartiQL batch statement response.. type BatchStatementResponse struct { - // The error associated with a failed PartiQL batch statement. + // The error associated with a failed PartiQL batch statement. Error *BatchStatementError - // A DynamoDB item associated with a BatchStatementResponse + // A DynamoDB item associated with a BatchStatementResponse Item map[string]AttributeValue - // The table name associated with a failed PartiQL batch statement. + // The table name associated with a failed PartiQL batch statement. TableName *string noSmithyDocumentSerde @@ -538,15 +595,20 @@ type BatchStatementResponse struct { // Contains the details for the read/write capacity mode. This page talks about // PROVISIONED and PAY_PER_REQUEST billing modes. For more information about these -// modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html) -// . You may need to switch to on-demand mode at least once in order to return a +// modes, see [Read/write capacity mode]. +// +// You may need to switch to on-demand mode at least once in order to return a // BillingModeSummary response. +// +// [Read/write capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html type BillingModeSummary struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We // recommend using PROVISIONED for predictable workloads. + // // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We // recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode @@ -594,119 +656,168 @@ type Capacity struct { } // Represents the selection criteria for a Query or Scan operation: +// // - For a Query operation, Condition is used for specifying the KeyConditions to // use when querying a table or an index. For KeyConditions , only the following -// comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | -// BETWEEN Condition is also used in a QueryFilter , which evaluates the query -// results and returns only the desired values. -// - For a Scan operation, Condition is used in a ScanFilter , which evaluates -// the scan results and returns only the desired values. +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter , which evaluates the query results and +// +// returns only the desired values. +// +// - For a Scan operation, Condition is used in a ScanFilter , which evaluates +// the scan results and returns only the desired values. type Condition struct { // A comparator for evaluating attributes. For example, equals, greater than, less - // than, etc. The following comparison operators are available: EQ | NE | LE | LT - // | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | - // BETWEEN The following are descriptions of each comparison operator. + // than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // // - EQ : Equal. EQ is supported for all data types, including lists and maps. - // AttributeValueList can contain only one AttributeValue element of type String, + // + // AttributeValueList can contain only one AttributeValue element of type String, // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue element of a different type than the one provided in the // request, the value does not match. For example, {"S":"6"} does not equal // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - NE : Not equal. NE is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue of a different type than the one provided in the request, the // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - LE : Less than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - LT : Less than. AttributeValueList can contain only one AttributeValue of - // type String, Number, or Binary (not a set type). If an item contains an - // AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . - // - GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, - // including lists and maps. This operator tests for the existence of an attribute, - // not its data type. If the data type of attribute " a " is null, and you - // evaluate it using NOT_NULL , the result is a Boolean true . This result is - // because the attribute " a " exists; its data type is not relevant to the - // NOT_NULL comparison operator. + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // // - NULL : The attribute does not exist. NULL is supported for all data types, - // including lists and maps. This operator tests for the nonexistence of an - // attribute, not its data type. If the data type of attribute " a " is null, and - // you evaluate it using NULL , the result is a Boolean false . This is because - // the attribute " a " exists; its data type is not relevant to the NULL - // comparison operator. - // - CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or Binary - // (not a set type). If the target attribute of the comparison is of type String, - // then the operator checks for a substring match. If the target attribute of the - // comparison is of type Binary, then the operator looks for a subsequence of the - // target that matches the input. If the target attribute of the comparison is a - // set (" SS ", " NS ", or " BS "), then the operator evaluates to true if it - // finds an exact match with any member of the set. CONTAINS is supported for - // lists: When evaluating " a CONTAINS b ", " a " can be a list; however, " b " - // cannot be a set, a map, or a list. + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in - // a set. AttributeValueList can contain only one AttributeValue element of type - // String, Number, or Binary (not a set type). If the target attribute of the - // comparison is a String, then the operator checks for the absence of a substring - // match. If the target attribute of the comparison is Binary, then the operator - // checks for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set (" SS ", " NS ", or " BS "), - // then the operator evaluates to true if it does not find an exact match with any - // member of the set. NOT_CONTAINS is supported for lists: When evaluating " a - // NOT CONTAINS b ", " a " can be a list; however, " b " cannot be a set, a map, - // or a list. - // - BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one - // AttributeValue of type String or Binary (not a Number or a set type). The - // target attribute of the comparison must be of type String or Binary (not a - // Number or a set type). - // - IN : Checks for matching elements in a list. AttributeValueList can contain - // one or more AttributeValue elements of type String, Number, or Binary. These - // attributes are compared against an existing attribute of an item. If any - // elements of the input are equal to the item attribute, the expression evaluates - // to true. + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // // - BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set type). A - // target attribute matches if the target value is greater than, or equal to, the - // first element and less than, or equal to, the second element. If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does not - // compare to {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} - // For usage examples of AttributeValueList and ComparisonOperator , see Legacy - // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator , see [Legacy Conditional Parameters] in the + // Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html // // This member is required. ComparisonOperator ComparisonOperator // One or more values to evaluate against the supplied attribute. The number of - // values in the list depends on the ComparisonOperator being used. For type - // Number, value comparisons are numeric. String value comparisons for greater - // than, equals, or less than are based on ASCII character code values. For - // example, a is greater than A , and a is greater than B . For a list of code - // values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it // compares binary values. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters AttributeValueList []AttributeValue noSmithyDocumentSerde @@ -717,8 +828,9 @@ type Condition struct { type ConditionCheck struct { // A condition that must be satisfied in order for a conditional update to - // succeed. For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. + // succeed. For more information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html // // This member is required. ConditionExpression *string @@ -736,13 +848,15 @@ type ConditionCheck struct { TableName *string // One or more substitution tokens for attribute names in an expression. For more - // information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expression attribute names]in the Amazon DynamoDB Developer Guide. + // + // [Expression attribute names]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html ExpressionAttributeNames map[string]string // One or more values that can be substituted in an expression. For more - // information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html ExpressionAttributeValues map[string]AttributeValue // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the @@ -756,8 +870,10 @@ type ConditionCheck struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table and // any indexes involved in the operation. ConsumedCapacity is only returned if the -// request asked for it. For more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) -// in the Amazon DynamoDB Developer Guide. +// request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB +// Developer Guide. +// +// [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html type ConsumedCapacity struct { // The total number of capacity units consumed by the operation. @@ -838,10 +954,18 @@ type CreateGlobalSecondaryIndexAction struct { // This member is required. Projection *Projection + // The maximum number of read and write units for the global secondary index being + // created. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -875,6 +999,11 @@ type CreateReplicationGroupMemberAction struct { // from the default DynamoDB KMS key alias/aws/dynamodb . KMSMasterKeyId *string + // The maximum on-demand throughput settings for the specified replica table being + // created. You can only modify MaxReadRequestUnits , because you can't modify + // MaxWriteRequestUnits for individual replica tables. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -889,10 +1018,10 @@ type CreateReplicationGroupMemberAction struct { // Processing options for the CSV file being imported. type CsvOptions struct { - // The delimiter used for separating items in the CSV file being imported. + // The delimiter used for separating items in the CSV file being imported. Delimiter *string - // List of the headers used to specify a common header for all source CSV files + // List of the headers used to specify a common header for all source CSV files // being imported. If this field is specified then the first line of each CSV file // is treated as data instead of the header. If this field is not specified the the // first line of each CSV file is treated as the header. @@ -1009,10 +1138,12 @@ type Endpoint struct { // can be used with DeleteItem , PutItem , or UpdateItem operations; if the // comparison evaluates to true, the operation succeeds; if not, the operation // fails. You can use ExpectedAttributeValue in one of two different ways: +// // - Use AttributeValueList to specify one or more values to compare against an // attribute. Use ComparisonOperator to specify how you want to perform the // comparison. If the comparison evaluates to true, then the conditional operation // succeeds. +// // - Use Value to specify a value that DynamoDB will compare against an // attribute. If the values match, then ExpectedAttributeValue evaluates to true // and the conditional operation succeeds. Optionally, you can also set Exists to @@ -1026,132 +1157,184 @@ type Endpoint struct { type ExpectedAttributeValue struct { // One or more values to evaluate against the supplied attribute. The number of - // values in the list depends on the ComparisonOperator being used. For type - // Number, value comparisons are numeric. String value comparisons for greater - // than, equals, or less than are based on ASCII character code values. For - // example, a is greater than A , and a is greater than B . For a list of code - // values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . For Binary, DynamoDB treats each byte of the binary data as unsigned when it - // compares binary values. For information on specifying data types in JSON, see - // JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) - // in the Amazon DynamoDB Developer Guide. + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // compares binary values. + // + // For information on specifying data types in JSON, see [JSON Data Format] in the Amazon DynamoDB + // Developer Guide. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // [JSON Data Format]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html AttributeValueList []AttributeValue // A comparator for evaluating attributes in the AttributeValueList . For example, - // equals, greater than, less than, etc. The following comparison operators are - // available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | - // NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each - // comparison operator. + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // // - EQ : Equal. EQ is supported for all data types, including lists and maps. - // AttributeValueList can contain only one AttributeValue element of type String, + // + // AttributeValueList can contain only one AttributeValue element of type String, // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue element of a different type than the one provided in the // request, the value does not match. For example, {"S":"6"} does not equal // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - NE : Not equal. NE is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue of a different type than the one provided in the request, the // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - LE : Less than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - LT : Less than. AttributeValueList can contain only one AttributeValue of - // type String, Number, or Binary (not a set type). If an item contains an - // AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . - // - GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, - // including lists and maps. This operator tests for the existence of an attribute, - // not its data type. If the data type of attribute " a " is null, and you - // evaluate it using NOT_NULL , the result is a Boolean true . This result is - // because the attribute " a " exists; its data type is not relevant to the - // NOT_NULL comparison operator. + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // // - NULL : The attribute does not exist. NULL is supported for all data types, - // including lists and maps. This operator tests for the nonexistence of an - // attribute, not its data type. If the data type of attribute " a " is null, and - // you evaluate it using NULL , the result is a Boolean false . This is because - // the attribute " a " exists; its data type is not relevant to the NULL - // comparison operator. - // - CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or Binary - // (not a set type). If the target attribute of the comparison is of type String, - // then the operator checks for a substring match. If the target attribute of the - // comparison is of type Binary, then the operator looks for a subsequence of the - // target that matches the input. If the target attribute of the comparison is a - // set (" SS ", " NS ", or " BS "), then the operator evaluates to true if it - // finds an exact match with any member of the set. CONTAINS is supported for - // lists: When evaluating " a CONTAINS b ", " a " can be a list; however, " b " - // cannot be a set, a map, or a list. + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in - // a set. AttributeValueList can contain only one AttributeValue element of type - // String, Number, or Binary (not a set type). If the target attribute of the - // comparison is a String, then the operator checks for the absence of a substring - // match. If the target attribute of the comparison is Binary, then the operator - // checks for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set (" SS ", " NS ", or " BS "), - // then the operator evaluates to true if it does not find an exact match with any - // member of the set. NOT_CONTAINS is supported for lists: When evaluating " a - // NOT CONTAINS b ", " a " can be a list; however, " b " cannot be a set, a map, - // or a list. - // - BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one - // AttributeValue of type String or Binary (not a Number or a set type). The - // target attribute of the comparison must be of type String or Binary (not a - // Number or a set type). - // - IN : Checks for matching elements in a list. AttributeValueList can contain - // one or more AttributeValue elements of type String, Number, or Binary. These - // attributes are compared against an existing attribute of an item. If any - // elements of the input are equal to the item attribute, the expression evaluates - // to true. + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // // - BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set type). A - // target attribute matches if the target value is greater than, or equal to, the - // first element and less than, or equal to, the second element. If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does not - // compare to {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} ComparisonOperator ComparisonOperator - // Causes DynamoDB to evaluate the value before attempting a conditional - // operation: + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // // - If Exists is true , DynamoDB will check to see if that attribute value // already exists in the table. If it is found, then the operation succeeds. If it // is not found, the operation fails with a ConditionCheckFailedException . + // // - If Exists is false , DynamoDB assumes that the attribute value does not // exist in the table. If in fact the value does not exist, then the assumption is // valid and the operation succeeds. If the value is found, despite the assumption // that it does not exist, the operation fails with a // ConditionCheckFailedException . + // // The default setting for Exists is true . If you supply a Value all by itself, // DynamoDB assumes the attribute exists: You don't have to set Exists to true , - // because it is implied. DynamoDB returns a ValidationException if: + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // // - Exists is true but there is no Value to check. (You expect a value to exist, // but don't specify what that value is.) + // // - Exists is false but you also provide a Value . (You cannot expect an // attribute to have a value, while also expecting it not to exist.) Exists *bool - // Represents the data for the expected attribute. Each attribute value is - // described as a name-value pair. The name is the data type, and the value is the - // data itself. For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes Value AttributeValue noSmithyDocumentSerde @@ -1216,7 +1399,9 @@ type ExportDescription struct { // Type of encryption used on the bucket where export data is stored. Valid values // for S3SseAlgorithm are: + // // - AES256 - server-side encryption with Amazon S3 managed keys + // // - KMS - server-side encryption with KMS managed keys S3SseAlgorithm S3SseAlgorithm @@ -1305,14 +1490,19 @@ type GlobalSecondaryIndex struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []KeySchemaElement @@ -1324,10 +1514,18 @@ type GlobalSecondaryIndex struct { // This member is required. Projection *Projection + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -1355,12 +1553,15 @@ type GlobalSecondaryIndexDescription struct { // added to the index. (Not all items will qualify: For example, a partition key // cannot have any duplicate values.) If an item can be added to the index, // DynamoDB will do so. After all items have been processed, the backfilling - // operation is complete and Backfilling is false. You can delete an index that is - // being created during the Backfilling phase when IndexStatus is set to CREATING - // and Backfilling is true. You can't delete the index that is being created when - // IndexStatus is set to CREATING and Backfilling is false. For indexes that were - // created during a CreateTable operation, the Backfilling attribute does not - // appear in the DescribeTable output. + // operation is complete and Backfilling is false. + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. Backfilling *bool // The Amazon Resource Name (ARN) that uniquely identifies the index. @@ -1375,9 +1576,13 @@ type GlobalSecondaryIndexDescription struct { IndexSizeBytes *int64 // The current state of the global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The index is being updated. + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. IndexStatus IndexStatus @@ -1388,25 +1593,38 @@ type GlobalSecondaryIndexDescription struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes and // index key attributes, which are automatically projected. Projection *Projection // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughputDescription noSmithyDocumentSerde @@ -1421,16 +1639,26 @@ type GlobalSecondaryIndexInfo struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes and // index key attributes, which are automatically projected. @@ -1444,18 +1672,26 @@ type GlobalSecondaryIndexInfo struct { } // Represents one of the following: +// // - A new global secondary index to be added to an existing table. +// // - New provisioned throughput parameters for an existing global secondary // index. +// // - An existing global secondary index to be removed from an existing table. type GlobalSecondaryIndexUpdate struct { // The parameters required for creating a global secondary index on an existing // table: + // // - IndexName + // // - KeySchema + // // - AttributeDefinitions + // // - Projection + // // - ProvisionedThroughput Create *CreateGlobalSecondaryIndexAction @@ -1494,9 +1730,13 @@ type GlobalTableDescription struct { GlobalTableName *string // The current state of the global table: + // // - CREATING - The global table is being created. + // // - UPDATING - The global table is being updated. + // // - DELETING - The global table is being deleted. + // // - ACTIVE - The global table is ready for use. GlobalTableStatus GlobalTableStatus @@ -1530,32 +1770,32 @@ type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { // Summary information about the source file for the import. type ImportSummary struct { - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with // this import task. CloudWatchLogGroupArn *string - // The time at which this import task ended. (Does this include the successful + // The time at which this import task ended. (Does this include the successful // complete creation of the table it was imported to?) EndTime *time.Time - // The Amazon Resource Number (ARN) corresponding to the import request. + // The Amazon Resource Number (ARN) corresponding to the import request. ImportArn *string - // The status of the import operation. + // The status of the import operation. ImportStatus ImportStatus - // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION . + // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION . InputFormat InputFormat - // The path and S3 bucket of the source file that is being imported. This includes - // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional if - // the bucket is owned by the requester). + // The path and S3 bucket of the source file that is being imported. This + // includes the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner + // (optional if the bucket is owned by the requester). S3BucketSource *S3BucketSource - // The time at which this import task began. + // The time at which this import task began. StartTime *time.Time - // The Amazon Resource Number (ARN) of the table being imported into. + // The Amazon Resource Number (ARN) of the table being imported into. TableArn *string noSmithyDocumentSerde @@ -1564,70 +1804,70 @@ type ImportSummary struct { // Represents the properties of the table being imported into. type ImportTableDescription struct { - // The client token that was provided for the import task. Reusing the client + // The client token that was provided for the import task. Reusing the client // token on retry makes a call to ImportTable idempotent. ClientToken *string - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with // the target table. CloudWatchLogGroupArn *string - // The time at which the creation of the table associated with this import task + // The time at which the creation of the table associated with this import task // completed. EndTime *time.Time - // The number of errors occurred on importing the source file into the target + // The number of errors occurred on importing the source file into the target // table. ErrorCount int64 - // The error code corresponding to the failure that the import job ran into during - // execution. + // The error code corresponding to the failure that the import job ran into + // during execution. FailureCode *string - // The error message corresponding to the failure that the import job ran into + // The error message corresponding to the failure that the import job ran into // during execution. FailureMessage *string - // The Amazon Resource Number (ARN) corresponding to the import request. + // The Amazon Resource Number (ARN) corresponding to the import request. ImportArn *string - // The status of the import. + // The status of the import. ImportStatus ImportStatus - // The number of items successfully imported into the new table. + // The number of items successfully imported into the new table. ImportedItemCount int64 - // The compression options for the data that has been imported into the target + // The compression options for the data that has been imported into the target // table. The values are NONE, GZIP, or ZSTD. InputCompressionType InputCompressionType - // The format of the source data going into the target table. + // The format of the source data going into the target table. InputFormat InputFormat - // The format options for the data that was imported into the target table. There + // The format options for the data that was imported into the target table. There // is one value, CsvOption. InputFormatOptions *InputFormatOptions - // The total number of items processed from the source file. + // The total number of items processed from the source file. ProcessedItemCount int64 - // The total size of data processed from the source file, in Bytes. + // The total size of data processed from the source file, in Bytes. ProcessedSizeBytes *int64 - // Values for the S3 bucket the source file is imported from. Includes bucket name - // (required), key prefix (optional) and bucket account owner ID (optional). + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). S3BucketSource *S3BucketSource - // The time when this import task started. + // The time when this import task started. StartTime *time.Time - // The Amazon Resource Number (ARN) of the table being imported into. + // The Amazon Resource Number (ARN) of the table being imported into. TableArn *string - // The parameters for the new table that is being imported into. + // The parameters for the new table that is being imported into. TableCreationParameters *TableCreationParameters - // The table id corresponding to the table created by import table process. + // The table id corresponding to the table created by import table process. TableId *string noSmithyDocumentSerde @@ -1655,11 +1895,12 @@ type IncrementalExportSpecification struct { noSmithyDocumentSerde } -// The format options for the data that was imported into the target table. There +// The format options for the data that was imported into the target table. There +// // is one value, CsvOption. type InputFormatOptions struct { - // The options for imported source files in CSV format. The values are Delimiter + // The options for imported source files in CSV format. The values are Delimiter // and HeaderList. Csv *CsvOptions @@ -1681,8 +1922,10 @@ type ItemCollectionMetrics struct { // includes the size of all the items in the table, plus the size of all attributes // projected into all of the local secondary indexes on that table. Use this // estimate to measure whether a local secondary index is approaching its size - // limit. The estimate is subject to change over time; therefore, do not rely on - // the precision or accuracy of the estimate. + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. SizeEstimateRangeGB []float64 noSmithyDocumentSerde @@ -1698,10 +1941,11 @@ type ItemResponse struct { } // Represents a set of primary keys and, for each key, the attributes to retrieve -// from the table. For each primary key, you must provide all of the key -// attributes. For example, with a simple primary key, you only need to provide the -// partition key. For a composite primary key, you must provide both the partition -// key and the sort key. +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For a +// composite primary key, you must provide both the partition key and the sort key. type KeysAndAttributes struct { // The primary key attribute values that define the items and the attributes @@ -1711,8 +1955,9 @@ type KeysAndAttributes struct { Keys []map[string]AttributeValue // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Legacy Conditional Parameters]in the Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html AttributesToGet []string // The consistency of a read operation. If set to true , then a strongly consistent @@ -1721,35 +1966,52 @@ type KeysAndAttributes struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Accessing Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the ProjectionExpression must be separated by commas. If no - // attribute names are specified, then all attributes will be returned. If any of - // the requested attributes are not found, they will not appear in the result. For - // more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string noSmithyDocumentSerde @@ -1757,12 +2019,15 @@ type KeysAndAttributes struct { // Represents a single element of a key schema. A key schema specifies the // attributes that make up the primary key of a table, or the key attributes of an -// index. A KeySchemaElement represents exactly one attribute of the primary key. -// For example, a simple primary key would be represented by one KeySchemaElement -// (for the partition key). A composite primary key would require one -// KeySchemaElement for the partition key, and another KeySchemaElement for the -// sort key. A KeySchemaElement must be a scalar, top-level attribute (not a -// nested attribute). The data type must be one of String, Number, or Binary. The +// index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement (for +// the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested +// attribute). The data type must be one of String, Number, or Binary. The // attribute cannot be nested within a List or a Map. type KeySchemaElement struct { @@ -1772,14 +2037,19 @@ type KeySchemaElement struct { AttributeName *string // The role that this key attribute will assume: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeyType KeyType @@ -1817,14 +2087,19 @@ type LocalSecondaryIndex struct { // The complete key schema for the local secondary index, consisting of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []KeySchemaElement @@ -1860,14 +2135,19 @@ type LocalSecondaryIndexDescription struct { // The complete key schema for the local secondary index, consisting of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement // Represents attributes that are copied (projected) from the table into the @@ -1887,14 +2167,19 @@ type LocalSecondaryIndexInfo struct { // The complete key schema for a local secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement // Represents attributes that are copied (projected) from the table into the @@ -1905,22 +2190,58 @@ type LocalSecondaryIndexInfo struct { noSmithyDocumentSerde } +// Sets the maximum number of read and write units for the specified on-demand +// table. If you use this parameter, you must specify MaxReadRequestUnits , +// MaxWriteRequestUnits , or both. +type OnDemandThroughput struct { + + // Maximum number of read request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxReadRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxReadRequestUnits to -1. + MaxReadRequestUnits *int64 + + // Maximum number of write request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxWriteRequestUnits to -1. + MaxWriteRequestUnits *int64 + + noSmithyDocumentSerde +} + +// Overrides the on-demand throughput settings for this replica table. If you +// don't specify a value for this parameter, it uses the source table's on-demand +// throughput settings. +type OnDemandThroughputOverride struct { + + // Maximum number of read request units for the specified replica table. + MaxReadRequestUnits *int64 + + noSmithyDocumentSerde +} + // Represents a PartiQL statement that uses parameters. type ParameterizedStatement struct { - // A PartiQL statement that uses parameters. + // A PartiQL statement that uses parameters. // // This member is required. Statement *string - // The parameter values. + // The parameter values. Parameters []AttributeValue // An optional parameter that returns the item attributes for a PartiQL - // ParameterizedStatement operation that failed a condition check. There is no - // additional cost associated with requesting a return value aside from the small - // network and processing overhead of receiving a larger response. No read capacity - // units are consumed. + // ParameterizedStatement operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -1937,7 +2258,9 @@ type PointInTimeRecoveryDescription struct { LatestRestorableDateTime *time.Time // The current state of point in time recovery: + // // - ENABLED - Point in time recovery is enabled. + // // - DISABLED - Point in time recovery is disabled. PointInTimeRecoveryStatus PointInTimeRecoveryStatus @@ -1962,6 +2285,7 @@ type PointInTimeRecoverySpecification struct { type Projection struct { // Represents the non-key attribute names which will be projected into the index. + // // For local secondary indexes, the total count of NonKeyAttributes summed across // all of the local secondary indexes, must not exceed 100. If you project the same // attribute into two different indexes, this counts as two distinct attributes @@ -1969,10 +2293,14 @@ type Projection struct { NonKeyAttributes []string // The set of attributes that are projected into the index: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the // secondary index will include other non-key attributes that you specify. + // // - ALL - All of the table attributes are projected into the index. + // // When using the DynamoDB console, ALL is selected by default. ProjectionType ProjectionType @@ -1980,26 +2308,32 @@ type Projection struct { } // Represents the provisioned throughput settings for a specified table or index. -// The settings can be modified using the UpdateTable operation. For current -// minimum and maximum provisioned throughput values, see Service, Account, and -// Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// in the Amazon DynamoDB Developer Guide. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the +// Amazon DynamoDB Developer Guide. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html type ProvisionedThroughput struct { // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. If read/write capacity mode is - // PAY_PER_REQUEST the value is set to 0. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html // // This member is required. ReadCapacityUnits *int64 // The maximum number of writes consumed per second before DynamoDB returns a - // ThrottlingException . For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. If read/write capacity mode is - // PAY_PER_REQUEST the value is set to 0. + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html // // This member is required. WriteCapacityUnits *int64 @@ -2018,9 +2352,10 @@ type ProvisionedThroughputDescription struct { LastIncreaseDateTime *time.Time // The number of provisioned throughput decreases for this table during this UTC - // calendar day. For current maximums on provisioned throughput decreases, see - // Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // calendar day. For current maximums on provisioned throughput decreases, see [Service, Account, and Table Quotas]in + // the Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html NumberOfDecreasesToday *int64 // The maximum number of strongly consistent reads consumed per second before @@ -2124,9 +2459,13 @@ type ReplicaAutoScalingDescription struct { ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription // The current state of the replica: + // // - CREATING - The replica is being created. + // // - UPDATING - The replica is being updated. + // // - DELETING - The replica is being deleted. + // // - ACTIVE - The replica is ready for use. ReplicaStatus ReplicaStatus @@ -2161,6 +2500,10 @@ type ReplicaDescription struct { // The KMS key of the replica that will be used for KMS encryption. KMSMasterKeyId *string + // Overrides the maximum on-demand throughput settings for the specified replica + // table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not described, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2173,19 +2516,28 @@ type ReplicaDescription struct { ReplicaInaccessibleDateTime *time.Time // The current state of the replica: + // // - CREATING - The replica is being created. + // // - UPDATING - The replica is being updated. + // // - DELETING - The replica is being deleted. + // // - ACTIVE - The replica is ready for use. + // // - REGION_DISABLED - The replica is inaccessible because the Amazon Web - // Services Region has been disabled. If the Amazon Web Services Region remains - // inaccessible for more than 20 hours, DynamoDB will remove this replica from the - // replication group. The replica will not be deleted and replication will stop - // from and to this region. - // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table - // is inaccessible. If the KMS key remains inaccessible for more than 20 hours, + // Services Region has been disabled. + // + // If the Amazon Web Services Region remains inaccessible for more than 20 hours, // DynamoDB will remove this replica from the replication group. The replica will // not be deleted and replication will stop from and to this region. + // + // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table + // is inaccessible. + // + // If the KMS key remains inaccessible for more than 20 hours, DynamoDB will + // remove this replica from the replication group. The replica will not be deleted + // and replication will stop from and to this region. ReplicaStatus ReplicaStatus // Detailed information about the replica status. @@ -2209,6 +2561,10 @@ type ReplicaGlobalSecondaryIndex struct { // This member is required. IndexName *string + // Overrides the maximum on-demand throughput settings for the specified global + // secondary index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica table GSI-specific provisioned throughput. If not specified, uses the // source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2223,10 +2579,14 @@ type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { IndexName *string // The current state of the replica global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The table/index configuration is being updated. The table/index // remains available for data operations when UPDATING + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. IndexStatus IndexStatus @@ -2261,6 +2621,10 @@ type ReplicaGlobalSecondaryIndexDescription struct { // The name of the global secondary index. IndexName *string + // Overrides the maximum on-demand throughput for the specified global secondary + // index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // If not described, uses the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2276,10 +2640,14 @@ type ReplicaGlobalSecondaryIndexSettingsDescription struct { // This member is required. IndexName *string - // The current status of the global secondary index: + // The current status of the global secondary index: + // // - CREATING - The global secondary index is being created. + // // - UPDATING - The global secondary index is being updated. + // // - DELETING - The global secondary index is being deleted. + // // - ACTIVE - The global secondary index is ready for use. IndexStatus IndexStatus @@ -2341,24 +2709,30 @@ type ReplicaSettingsDescription struct { ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedReadCapacityUnits *int64 // Auto scaling settings for a global table replica's write capacity units. ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription // The maximum number of writes consumed per second before DynamoDB returns a - // ThrottlingException . For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedWriteCapacityUnits *int64 // The current state of the Region: + // // - CREATING - The Region is being created. + // // - UPDATING - The Region is being updated. + // // - DELETING - The Region is being deleted. + // // - ACTIVE - The Region is ready for use. ReplicaStatus ReplicaStatus @@ -2384,9 +2758,10 @@ type ReplicaSettingsUpdate struct { ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedReadCapacityUnits *int64 // Replica-specific table class. If not specified, uses the source table's table @@ -2397,10 +2772,13 @@ type ReplicaSettingsUpdate struct { } // Represents one of the following: +// // - A new replica to be added to an existing regional table or global table. // This request invokes the CreateTableReplica action in the destination Region. +// // - New parameters for an existing replica. This request invokes the UpdateTable // action in the destination Region. +// // - An existing replica to be deleted. The request invokes the // DeleteTableReplica action in the destination Region, deleting the replica and // all if its items in the destination Region. @@ -2423,8 +2801,11 @@ type ReplicationGroupUpdate struct { } // Represents one of the following: +// // - A new replica to be added to an existing global table. +// // - New parameters for an existing replica. +// // - An existing replica to be removed from an existing global table. type ReplicaUpdate struct { @@ -2462,16 +2843,16 @@ type RestoreSummary struct { // The S3 bucket that is being imported from. type S3BucketSource struct { - // The S3 bucket that is being imported from. + // The S3 bucket that is being imported from. // // This member is required. S3Bucket *string - // The account number of the S3 bucket that is being imported from. If the bucket + // The account number of the S3 bucket that is being imported from. If the bucket // is owned by the requester this is optional. S3BucketOwner *string - // The key prefix shared by all S3 Objects that are being imported. + // The key prefix shared by all S3 Objects that are being imported. S3KeyPrefix *string noSmithyDocumentSerde @@ -2507,8 +2888,10 @@ type SourceTableDetails struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We // recommend using PROVISIONED for predictable workloads. + // // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We // recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode @@ -2516,6 +2899,11 @@ type SourceTableDetails struct { // Number of items in the table. Note that this is an approximate value. ItemCount *int64 + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // ARN of the table for which backup was created. TableArn *string @@ -2566,13 +2954,16 @@ type SSEDescription struct { KMSMasterKeyArn *string // Server-side encryption type. The only supported value is: + // // - KMS - Server-side encryption that uses Key Management Service. The key is // stored in your account and is managed by KMS (KMS charges apply). SSEType SSEType // Represents the current state of server-side encryption. The only supported // values are: + // // - ENABLED - Server-side encryption is enabled. + // // - UPDATING - Server-side encryption is being updated. Status SSEStatus @@ -2596,6 +2987,7 @@ type SSESpecification struct { KMSMasterKeyId *string // Server-side encryption type. The only supported value is: + // // - KMS - Server-side encryption that uses Key Management Service. The key is // stored in your account and is managed by KMS (KMS charges apply). SSEType SSEType @@ -2612,15 +3004,19 @@ type StreamSpecification struct { // This member is required. StreamEnabled *bool - // When an item in the table is modified, StreamViewType determines what + // When an item in the table is modified, StreamViewType determines what // information is written to the stream for this table. Valid values for // StreamViewType are: + // // - KEYS_ONLY - Only the key attributes of the modified item are written to the // stream. + // // - NEW_IMAGE - The entire item, as it appears after it was modified, is written // to the stream. + // // - OLD_IMAGE - The entire item, as it appeared before it was modified, is // written to the stream. + // // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are // written to the stream. StreamViewType StreamViewType @@ -2638,9 +3034,13 @@ type TableAutoScalingDescription struct { TableName *string // The current state of the table: + // // - CREATING - The table is being created. + // // - UPDATING - The table is being updated. + // // - DELETING - The table is being deleted. + // // - ACTIVE - The table is ready for use. TableStatus TableStatus @@ -2663,35 +3063,42 @@ type TableClassSummary struct { // The parameters for the table created as part of the import operation. type TableCreationParameters struct { - // The attributes of the table created as part of the import operation. + // The attributes of the table created as part of the import operation. // // This member is required. AttributeDefinitions []AttributeDefinition - // The primary key and option sort key of the table created as part of the import + // The primary key and option sort key of the table created as part of the import // operation. // // This member is required. KeySchema []KeySchemaElement - // The name of the table created as part of the import operation. + // The name of the table created as part of the import operation. // // This member is required. TableName *string - // The billing mode for provisioning the table created as part of the import + // The billing mode for provisioning the table created as part of the import // operation. BillingMode BillingMode - // The Global Secondary Indexes (GSI) of the table to be created as part of the + // The Global Secondary Indexes (GSI) of the table to be created as part of the // import operation. GlobalSecondaryIndexes []GlobalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. For current - // minimum and maximum provisioned throughput values, see Service, Account, and - // Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput // Represents the settings used to enable server-side encryption. @@ -2707,17 +3114,21 @@ type TableDescription struct { ArchivalSummary *ArchivalSummary // An array of AttributeDefinition objects. Each of these objects describes one - // attribute in the table and index key schema. Each AttributeDefinition object in - // this array is composed of: + // attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // // - AttributeName - The name of the attribute. + // // - AttributeType - The data type for the attribute. AttributeDefinitions []AttributeDefinition // Contains the details for the read/write capacity mode. BillingModeSummary *BillingModeSummary - // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) - // format. + // The date and time when the table was created, in [UNIX epoch time] format. + // + // [UNIX epoch time]: http://www.epochconverter.com/ CreationDateTime *time.Time // Indicates whether deletion protection is enabled (true) or disabled (false) on @@ -2726,53 +3137,75 @@ type TableDescription struct { // The global secondary indexes, if any, on the table. Each index is scoped to a // given partition key value. Each element is composed of: + // // - Backfilling - If true, then the index is currently in the backfilling phase. // Backfilling occurs only when a new global secondary index is added to the table. // It is the process by which DynamoDB populates the new index with data from the // table. (This attribute does not appear for indexes that were created during a - // CreateTable operation.) You can delete an index that is being created during - // the Backfilling phase when IndexStatus is set to CREATING and Backfilling is - // true. You can't delete the index that is being created when IndexStatus is set - // to CREATING and Backfilling is false. (This attribute does not appear for - // indexes that were created during a CreateTable operation.) + // CreateTable operation.) + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and + // Backfilling is false. (This attribute does not appear for indexes that were + // created during a CreateTable operation.) + // // - IndexName - The name of the global secondary index. + // // - IndexSizeBytes - The total size of the global secondary index, in bytes. // DynamoDB updates this value approximately every six hours. Recent changes might // not be reflected in this value. + // // - IndexStatus - The current status of the global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The index is being updated. + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. + // // - ItemCount - The number of items in the global secondary index. DynamoDB // updates this value approximately every six hours. Recent changes might not be // reflected in this value. + // // - KeySchema - Specifies the complete index key schema. The attribute names in // the key schema must be between 1 and 255 characters (inclusive). The key schema // must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the // secondary index will include other non-key attributes that you specify. + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - ProvisionedThroughput - The provisioned throughput settings for the global // secondary index, consisting of read and write capacity units, along with data // about increases and decreases. + // // If the table is in the DELETING state, no information about indexes will be // returned. GlobalSecondaryIndexes []GlobalSecondaryIndexDescription - // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) - // in use, if the table is replicated across Amazon Web Services Regions. + // Represents the version of [global tables] in use, if the table is replicated across Amazon Web + // Services Regions. + // + // [global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html GlobalTableVersion *string // The number of items in the specified table. DynamoDB updates this value @@ -2781,30 +3214,45 @@ type TableDescription struct { ItemCount *int64 // The primary key structure for the table. Each KeySchemaElement consists of: + // // - AttributeName - The name of the attribute. + // // - KeyType - The role of the attribute: + // // - HASH - partition key - // - RANGE - sort key The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. - // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) - // in the Amazon DynamoDB Developer Guide. + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey KeySchema []KeySchemaElement // The Amazon Resource Name (ARN) that uniquely identifies the latest stream for // this table. LatestStreamArn *string - // A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel - // is not a unique identifier for the stream, because it is possible that a stream - // from another table might have the same timestamp. However, the combination of - // the following three elements is guaranteed to be unique: + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to be + // unique: + // // - Amazon Web Services customer ID + // // - Table name + // // - StreamLabel LatestStreamLabel *string @@ -2814,33 +3262,48 @@ type TableDescription struct { // within a given item collection cannot exceed 10 GB. Each element is composed of: // // - IndexName - The name of the local secondary index. + // // - KeySchema - Specifies the complete index key schema. The attribute names in // the key schema must be between 1 and 255 characters (inclusive). The key schema // must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB // updates this value approximately every six hours. Recent changes might not be // reflected in this value. + // // - ItemCount - Represents the number of items in the index. DynamoDB updates // this value approximately every six hours. Recent changes might not be reflected // in this value. + // // If the table is in the DELETING state, no information about indexes will be // returned. LocalSecondaryIndexes []LocalSecondaryIndexDescription + // The maximum number of read and write units for the specified on-demand table. + // If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // The provisioned throughput settings for the table, consisting of read and write // capacity units, along with data about increases and decreases. ProvisionedThroughput *ProvisionedThroughputDescription @@ -2875,17 +3338,24 @@ type TableDescription struct { TableSizeBytes *int64 // The current state of the table: + // // - CREATING - The table is being created. + // // - UPDATING - The table/index configuration is being updated. The table/index // remains available for data operations when UPDATING . + // // - DELETING - The table is being deleted. + // // - ACTIVE - The table is ready for use. + // // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table // in inaccessible. Table operations may fail due to failure to use the KMS key. // DynamoDB will initiate the table archival process when a table's KMS key remains // inaccessible for more than seven days. + // // - ARCHIVING - The table is being archived. Operations are not allowed until // archival is complete. + // // - ARCHIVED - The table has been archived. See the ArchivalReason for more // information. TableStatus TableStatus @@ -2894,13 +3364,18 @@ type TableDescription struct { } // Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a -// single DynamoDB table. Amazon Web Services-assigned tag names and values are -// automatically assigned the aws: prefix, which the user cannot assign. Amazon -// Web Services-assigned tag names do not count towards the tag limit of 50. -// User-assigned tag names have the prefix user: in the Cost Allocation Report. -// You cannot backdate the application of a tag. For an overview on tagging -// DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// single DynamoDB table. +// +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag +// names do not count towards the tag limit of 50. User-assigned tag names have the +// prefix user: in the Cost Allocation Report. You cannot backdate the application +// of a tag. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html type Tag struct { // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only @@ -2921,10 +3396,10 @@ type Tag struct { // The description of the Time to Live (TTL) status on the specified table. type TimeToLiveDescription struct { - // The name of the TTL attribute for items in the table. + // The name of the TTL attribute for items in the table. AttributeName *string - // The TTL status for the table. + // The TTL status for the table. TimeToLiveStatus TimeToLiveStatus noSmithyDocumentSerde @@ -3027,12 +3502,18 @@ type UpdateGlobalSecondaryIndexAction struct { // This member is required. IndexName *string + // Updates the maximum number of read and write units for the specified global + // secondary index. If you use this parameter, you must specify MaxReadRequestUnits + // , MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. // - // This member is required. + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -3064,6 +3545,9 @@ type UpdateReplicationGroupMemberAction struct { // default DynamoDB KMS key alias/aws/dynamodb . KMSMasterKeyId *string + // Overrides the maximum on-demand throughput for the replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go index d1925ed8b4b..3d7f72cbefb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go @@ -2430,9 +2430,7 @@ func validateUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryInde if v.IndexName == nil { invalidParams.Add(smithy.NewErrParamRequired("IndexName")) } - if v.ProvisionedThroughput == nil { - invalidParams.Add(smithy.NewErrParamRequired("ProvisionedThroughput")) - } else if v.ProvisionedThroughput != nil { + if v.ProvisionedThroughput != nil { if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/CHANGELOG.md index 3482794f9aa..c1933104439 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.8 (2024-05-23) + +* No change notes available for this release. + +# v1.20.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.20.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_client.go index 360fcae966e..eeffe7c1764 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -31,6 +34,9 @@ const ServiceAPIVersion = "2012-08-10" // Streams. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -69,6 +75,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -230,15 +238,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -442,6 +451,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -485,6 +518,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_DescribeStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_DescribeStream.go index db07d2412e3..401a644d6ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_DescribeStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_DescribeStream.go @@ -13,12 +13,15 @@ import ( // Returns information about a stream, including the current status of the stream, // its Amazon Resource Name (ARN), the composition of its shards, and its -// corresponding DynamoDB table. You can call DescribeStream at a maximum rate of -// 10 times per second. Each shard in the stream has a SequenceNumberRange -// associated with it. If the SequenceNumberRange has a StartingSequenceNumber but -// no EndingSequenceNumber , then the shard is still open (able to receive more -// stream records). If both StartingSequenceNumber and EndingSequenceNumber are -// present, then that shard is closed and can no longer receive more data. +// corresponding DynamoDB table. +// +// You can call DescribeStream at a maximum rate of 10 times per second. +// +// Each shard in the stream has a SequenceNumberRange associated with it. If the +// SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber , +// then the shard is still open (able to receive more stream records). If both +// StartingSequenceNumber and EndingSequenceNumber are present, then that shard is +// closed and can no longer receive more data. func (c *Client) DescribeStream(ctx context.Context, params *DescribeStreamInput, optFns ...func(*Options)) (*DescribeStreamOutput, error) { if params == nil { params = &DescribeStreamInput{} @@ -121,6 +124,12 @@ func (c *Client) addOperationDescribeStreamMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetRecords.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetRecords.go index 4e5a11103f2..028eed5c36a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetRecords.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetRecords.go @@ -11,14 +11,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the stream records from a given shard. Specify a shard iterator using -// the ShardIterator parameter. The shard iterator specifies the position in the -// shard from which you want to start reading stream records sequentially. If there -// are no stream records available in the portion of the shard that the iterator -// points to, GetRecords returns an empty list. Note that it might take multiple -// calls to get to a portion of the shard that contains stream records. GetRecords -// can retrieve a maximum of 1 MB of data or 1000 stream records, whichever comes -// first. +// Retrieves the stream records from a given shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading stream +// records sequentially. If there are no stream records available in the portion of +// the shard that the iterator points to, GetRecords returns an empty list. Note +// that it might take multiple calls to get to a portion of the shard that contains +// stream records. +// +// GetRecords can retrieve a maximum of 1 MB of data or 1000 stream records, +// whichever comes first. func (c *Client) GetRecords(ctx context.Context, params *GetRecordsInput, optFns ...func(*Options)) (*GetRecordsOutput, error) { if params == nil { params = &GetRecordsInput{} @@ -122,6 +125,12 @@ func (c *Client) addOperationGetRecordsMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetRecordsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetShardIterator.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetShardIterator.go index f39d19bc43c..7fd034a0978 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetShardIterator.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_GetShardIterator.go @@ -13,8 +13,9 @@ import ( // Returns a shard iterator. A shard iterator provides information about how to // retrieve the stream records from within a shard. Use the shard iterator in a -// subsequent GetRecords request to read the stream records from the shard. A -// shard iterator expires 15 minutes after it is returned to the requester. +// subsequent GetRecords request to read the stream records from the shard. +// +// A shard iterator expires 15 minutes after it is returned to the requester. func (c *Client) GetShardIterator(ctx context.Context, params *GetShardIteratorInput, optFns ...func(*Options)) (*GetShardIteratorOutput, error) { if params == nil { params = &GetShardIteratorInput{} @@ -40,14 +41,18 @@ type GetShardIteratorInput struct { // Determines how the shard iterator is used to start reading stream records from // the shard: + // // - AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a // specific sequence number. + // // - AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a // specific sequence number. + // // - TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which is // the oldest record in the shard. In DynamoDB Streams, there is a 24 hour limit on // data retention. Stream records whose age exceeds this limit are subject to // removal (trimming) from the stream. + // // - LATEST - Start reading just after the most recent stream record in the // shard, so that you always read the most recent data in the shard. // @@ -134,6 +139,12 @@ func (c *Client) addOperationGetShardIteratorMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetShardIteratorValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_ListStreams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_ListStreams.go index 817d777413d..aebf7da44b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_ListStreams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/api_op_ListStreams.go @@ -13,8 +13,9 @@ import ( // Returns an array of stream ARNs associated with the current account and // endpoint. If the TableName parameter is present, then ListStreams will return -// only the streams ARNs for that table. You can call ListStreams at a maximum -// rate of 5 times per second. +// only the streams ARNs for that table. +// +// You can call ListStreams at a maximum rate of 5 times per second. func (c *Client) ListStreams(ctx context.Context, params *ListStreamsInput, optFns ...func(*Options)) (*ListStreamsOutput, error) { if params == nil { params = &ListStreamsInput{} @@ -53,8 +54,11 @@ type ListStreamsOutput struct { // The stream ARN of the item where the operation stopped, inclusive of the // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedStreamArn is empty, then the "last - // page" of results has been processed and there is no more data to be retrieved. + // value in the new request. + // + // If LastEvaluatedStreamArn is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // // If LastEvaluatedStreamArn is not empty, it does not necessarily mean that there // is more data in the result set. The only way to know when you have reached the // end of the result set is when LastEvaluatedStreamArn is empty. @@ -124,6 +128,12 @@ func (c *Client) addOperationListStreamsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListStreams(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/auth.go index 25738f652c9..f1b09d32559 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/deserializers.go index dc2231bf986..556b624f503 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/deserializers.go @@ -18,8 +18,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsjson10_deserializeOpDescribeStream struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/doc.go index 6c1902cb960..1e9d09b4c00 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/doc.go @@ -3,8 +3,11 @@ // Package dynamodbstreams provides the API client, operations, and parameter // types for Amazon DynamoDB Streams. // -// Amazon DynamoDB Amazon DynamoDB Streams provides API actions for accessing -// streams and processing stream records. To learn more about application -// development with Streams, see Capturing Table Activity with DynamoDB Streams (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html) -// in the Amazon DynamoDB Developer Guide. +// # Amazon DynamoDB +// +// Amazon DynamoDB Streams provides API actions for accessing streams and +// processing stream records. To learn more about application development with +// Streams, see [Capturing Table Activity with DynamoDB Streams]in the Amazon DynamoDB Developer Guide. +// +// [Capturing Table Activity with DynamoDB Streams]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html package dynamodbstreams diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/endpoints.go index 26aefb7e225..abcfa771087 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/endpoints.go @@ -289,6 +289,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -591,7 +602,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -621,6 +632,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -630,7 +645,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/go_module_metadata.go index b1f73a217d2..c9b59ccd592 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/go_module_metadata.go @@ -3,4 +3,4 @@ package dynamodbstreams // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.4" +const goModuleVersion = "1.22.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/options.go index 6304b262f06..a3ac2cf889e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/enums.go index 9bf4e095381..4564c0fd2a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/enums.go @@ -11,8 +11,9 @@ const ( ) // Values returns all known values for KeyType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (KeyType) Values() []KeyType { return []KeyType{ "HASH", @@ -30,8 +31,9 @@ const ( ) // Values returns all known values for OperationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (OperationType) Values() []OperationType { return []OperationType{ "INSERT", @@ -51,8 +53,9 @@ const ( ) // Values returns all known values for ShardIteratorType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ShardIteratorType) Values() []ShardIteratorType { return []ShardIteratorType{ "TRIM_HORIZON", @@ -73,8 +76,9 @@ const ( ) // Values returns all known values for StreamStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamStatus) Values() []StreamStatus { return []StreamStatus{ "ENABLING", @@ -95,8 +99,9 @@ const ( ) // Values returns all known values for StreamViewType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamViewType) Values() []StreamViewType { return []StreamViewType{ "NEW_IMAGE", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/errors.go index 09329a9c57a..b1acc7d516d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/errors.go @@ -62,17 +62,26 @@ func (e *InternalServerError) ErrorCode() string { func (e *InternalServerError) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } // There is no limit to the number of daily on-demand backups that can be taken. +// // For most purposes, up to 500 simultaneous table operations are allowed per // account. These operations include CreateTable , UpdateTable , DeleteTable , -// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . When -// you are creating a table with one or more secondary indexes, you can have up to -// 250 such requests running at a time. However, if the table or index +// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . +// +// When you are creating a table with one or more secondary indexes, you can have +// up to 250 such requests running at a time. However, if the table or index // specifications are complex, then DynamoDB might temporarily reduce the number of -// concurrent operations. When importing into DynamoDB, up to 50 simultaneous -// import table operations are allowed per account. There is a soft account quota -// of 2,500 tables. GetRecords was called with a value of more than 1000 for the -// limit request parameter. More than 2 processes are reading from the same streams -// shard at the same time. Exceeding this limit may result in request throttling. +// concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations are +// allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same time. +// Exceeding this limit may result in request throttling. type LimitExceededException struct { Message *string @@ -125,12 +134,15 @@ func (e *ResourceNotFoundException) ErrorCode() string { } func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The operation attempted to read past the oldest stream record in a shard. In -// DynamoDB Streams, there is a 24 hour limit on data retention. Stream records +// The operation attempted to read past the oldest stream record in a shard. +// +// In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records // whose age exceeds this limit are subject to removal (trimming) from the stream. // You might receive a TrimmedDataAccessException if: +// // - You request a shard iterator with a sequence number older than the trim // point (24 hours). +// // - You obtain a shard iterator, but before you use the iterator in a GetRecords // request, a stream record in the shard exceeds the 24 hour period and is trimmed. // This causes the iterator to access a record that no longer exists. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/types.go index 0e2d3522338..bfd4ad48d0f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types/types.go @@ -7,10 +7,12 @@ import ( "time" ) -// Represents the data for an attribute. Each attribute value is described as a -// name-value pair. The name is the data type, and the value is the data itself. -// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) -// in the Amazon DynamoDB Developer Guide. +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. // // The following types satisfy this interface: // @@ -24,12 +26,15 @@ import ( // AttributeValueMemberNULL // AttributeValueMemberS // AttributeValueMemberSS +// +// [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes type AttributeValue interface { isAttributeValue() } -// An attribute of type Binary. For example: "B": -// "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" +// An attribute of type Binary. For example: +// +// "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" type AttributeValueMemberB struct { Value []byte @@ -38,7 +43,9 @@ type AttributeValueMemberB struct { func (*AttributeValueMemberB) isAttributeValue() {} -// An attribute of type Boolean. For example: "BOOL": true +// An attribute of type Boolean. For example: +// +// "BOOL": true type AttributeValueMemberBOOL struct { Value bool @@ -47,8 +54,9 @@ type AttributeValueMemberBOOL struct { func (*AttributeValueMemberBOOL) isAttributeValue() {} -// An attribute of type Binary Set. For example: "BS": ["U3Vubnk=", "UmFpbnk=", -// "U25vd3k="] +// An attribute of type Binary Set. For example: +// +// "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] type AttributeValueMemberBS struct { Value [][]byte @@ -57,8 +65,9 @@ type AttributeValueMemberBS struct { func (*AttributeValueMemberBS) isAttributeValue() {} -// An attribute of type List. For example: "L": [ {"S": "Cookies"} , {"S": -// "Coffee"}, {"N": "3.14159"}] +// An attribute of type List. For example: +// +// "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] type AttributeValueMemberL struct { Value []AttributeValue @@ -67,8 +76,9 @@ type AttributeValueMemberL struct { func (*AttributeValueMemberL) isAttributeValue() {} -// An attribute of type Map. For example: "M": {"Name": {"S": "Joe"}, "Age": {"N": -// "35"}} +// An attribute of type Map. For example: +// +// "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} type AttributeValueMemberM struct { Value map[string]AttributeValue @@ -77,10 +87,13 @@ type AttributeValueMemberM struct { func (*AttributeValueMemberM) isAttributeValue() {} -// An attribute of type Number. For example: "N": "123.45" Numbers are sent across -// the network to DynamoDB as strings, to maximize compatibility across languages -// and libraries. However, DynamoDB treats them as number type attributes for -// mathematical operations. +// An attribute of type Number. For example: +// +// "N": "123.45" +// +// Numbers are sent across the network to DynamoDB as strings, to maximize +// compatibility across languages and libraries. However, DynamoDB treats them as +// number type attributes for mathematical operations. type AttributeValueMemberN struct { Value string @@ -89,8 +102,11 @@ type AttributeValueMemberN struct { func (*AttributeValueMemberN) isAttributeValue() {} -// An attribute of type Number Set. For example: "NS": ["42.2", "-19", "7.5", -// "3.14"] Numbers are sent across the network to DynamoDB as strings, to maximize +// An attribute of type Number Set. For example: +// +// "NS": ["42.2", "-19", "7.5", "3.14"] +// +// Numbers are sent across the network to DynamoDB as strings, to maximize // compatibility across languages and libraries. However, DynamoDB treats them as // number type attributes for mathematical operations. type AttributeValueMemberNS struct { @@ -101,7 +117,9 @@ type AttributeValueMemberNS struct { func (*AttributeValueMemberNS) isAttributeValue() {} -// An attribute of type Null. For example: "NULL": true +// An attribute of type Null. For example: +// +// "NULL": true type AttributeValueMemberNULL struct { Value bool @@ -110,7 +128,9 @@ type AttributeValueMemberNULL struct { func (*AttributeValueMemberNULL) isAttributeValue() {} -// An attribute of type String. For example: "S": "Hello" +// An attribute of type String. For example: +// +// "S": "Hello" type AttributeValueMemberS struct { Value string @@ -119,8 +139,9 @@ type AttributeValueMemberS struct { func (*AttributeValueMemberS) isAttributeValue() {} -// An attribute of type String Set. For example: "SS": ["Giraffe", "Hippo" -// ,"Zebra"] +// An attribute of type String Set. For example: +// +// "SS": ["Giraffe", "Hippo" ,"Zebra"] type AttributeValueMemberSS struct { Value []string @@ -144,12 +165,15 @@ type Identity struct { // Represents a single element of a key schema. A key schema specifies the // attributes that make up the primary key of a table, or the key attributes of an -// index. A KeySchemaElement represents exactly one attribute of the primary key. -// For example, a simple primary key would be represented by one KeySchemaElement -// (for the partition key). A composite primary key would require one -// KeySchemaElement for the partition key, and another KeySchemaElement for the -// sort key. A KeySchemaElement must be a scalar, top-level attribute (not a -// nested attribute). The data type must be one of String, Number, or Binary. The +// index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement (for +// the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested +// attribute). The data type must be one of String, Number, or Binary. The // attribute cannot be nested within a List or a Map. type KeySchemaElement struct { @@ -159,14 +183,19 @@ type KeySchemaElement struct { AttributeName *string // The role that this key attribute will assume: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeyType KeyType @@ -189,8 +218,11 @@ type Record struct { EventID *string // The type of data modification that was performed on the DynamoDB table: + // // - INSERT - a new item was added to the table. + // // - MODIFY - one or more of an existing item's attributes were modified. + // // - REMOVE - the item was deleted from the table EventName OperationType @@ -199,16 +231,23 @@ type Record struct { EventSource *string // The version number of the stream record format. This number is updated whenever - // the structure of Record is modified. Client applications must not assume that - // eventVersion will remain at a particular value, as this number is subject to - // change at any time. In general, eventVersion will only increase as the - // low-level DynamoDB Streams API evolves. + // the structure of Record is modified. + // + // Client applications must not assume that eventVersion will remain at a + // particular value, as this number is subject to change at any time. In general, + // eventVersion will only increase as the low-level DynamoDB Streams API evolves. EventVersion *string // Items that are deleted by the Time to Live process after expiration have the // following fields: - // - Records[].userIdentity.type "Service" - // - Records[].userIdentity.principalId "dynamodb.amazonaws.com" + // + // - Records[].userIdentity.type + // + // "Service" + // + // - Records[].userIdentity.principalId + // + // "dynamodb.amazonaws.com" UserIdentity *Identity noSmithyDocumentSerde @@ -250,12 +289,17 @@ type Stream struct { // The Amazon Resource Name (ARN) for the stream. StreamArn *string - // A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel - // is not a unique identifier for the stream, because it is possible that a stream - // from another table might have the same timestamp. However, the combination of - // the following three elements is guaranteed to be unique: + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to be + // unique: + // // - the Amazon Web Services customer ID. + // // - the table name + // // - the StreamLabel StreamLabel *string @@ -276,11 +320,14 @@ type StreamDescription struct { // The shard ID of the item where the operation stopped, inclusive of the previous // result set. Use this value to start a new operation, excluding this value in the - // new request. If LastEvaluatedShardId is empty, then the "last page" of results - // has been processed and there is currently no more data to be retrieved. If - // LastEvaluatedShardId is not empty, it does not necessarily mean that there is - // more data in the result set. The only way to know when you have reached the end - // of the result set is when LastEvaluatedShardId is empty. + // new request. + // + // If LastEvaluatedShardId is empty, then the "last page" of results has been + // processed and there is currently no more data to be retrieved. + // + // If LastEvaluatedShardId is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached the + // end of the result set is when LastEvaluatedShardId is empty. LastEvaluatedShardId *string // The shards that comprise the stream. @@ -289,29 +336,42 @@ type StreamDescription struct { // The Amazon Resource Name (ARN) for the stream. StreamArn *string - // A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel - // is not a unique identifier for the stream, because it is possible that a stream - // from another table might have the same timestamp. However, the combination of - // the following three elements is guaranteed to be unique: + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to be + // unique: + // // - the Amazon Web Services customer ID. + // // - the table name + // // - the StreamLabel StreamLabel *string // Indicates the current status of the stream: + // // - ENABLING - Streams is currently being enabled on the DynamoDB table. + // // - ENABLED - the stream is enabled. + // // - DISABLING - Streams is currently being disabled on the DynamoDB table. + // // - DISABLED - the stream is disabled. StreamStatus StreamStatus // Indicates the format of the records within this stream: + // // - KEYS_ONLY - only the key attributes of items that were modified in the // DynamoDB table. + // // - NEW_IMAGE - entire items from the table, as they appeared after they were // modified. + // // - OLD_IMAGE - entire items from the table, as they appeared before they were // modified. + // // - NEW_AND_OLD_IMAGES - both the new and the old images of the items from the // table. StreamViewType StreamViewType @@ -326,9 +386,10 @@ type StreamDescription struct { // DynamoDB table. type StreamRecord struct { - // The approximate date and time when the stream record was created, in UNIX epoch - // time (http://www.epochconverter.com/) format and rounded down to the closest - // second. + // The approximate date and time when the stream record was created, in [UNIX epoch time] format + // and rounded down to the closest second. + // + // [UNIX epoch time]: http://www.epochconverter.com/ ApproximateCreationDateTime *time.Time // The primary key attribute(s) for the DynamoDB item that was modified. @@ -348,9 +409,13 @@ type StreamRecord struct { // The type of data from the modified DynamoDB item that was captured in this // stream record: + // // - KEYS_ONLY - only the key attributes of the modified item. + // // - NEW_IMAGE - the entire item, as it appeared after it was modified. + // // - OLD_IMAGE - the entire item, as it appeared before it was modified. + // // - NEW_AND_OLD_IMAGES - both the new and the old item images of the item. StreamViewType StreamViewType diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 9cf6cf22b40..2c9b1d6d4f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.11.3 (2024-06-28) + +* No change notes available for this release. + # v1.11.2 (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 6339b54191a..b59fb2afcc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.2" +const goModuleVersion = "1.11.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md index bb5a864a02f..e95eb7b973a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.9.16 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.15 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.6 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go index d35a8bfcd2e..1ffc9c7f192 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go @@ -3,4 +3,4 @@ package endpointdiscovery // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.6" +const goModuleVersion = "1.9.16" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 35c7050dd1b..c03183e1c53 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.11.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.7 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index daf77b5c38c..a21b047962d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.7" +const goModuleVersion = "1.11.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md index 84a6a5a8a51..d3321786577 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.29.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.28.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.11 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2024-05-23) + +* No change notes available for this release. + +# v1.27.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.27.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_client.go index c968824a106..03101a1bcb5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2013-12-02" // Client provides the API client to make operations call for Amazon Kinesis. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -231,15 +239,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -443,6 +452,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -486,6 +519,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go index 2f8dd23838f..486523c3950 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go @@ -12,12 +12,16 @@ import ( ) // Adds or updates tags for the specified Kinesis data stream. You can assign up -// to 50 tags to a data stream. When invoking this API, you must use either the -// StreamARN or the StreamName parameter, or both. It is recommended that you use -// the StreamARN input parameter when you invoke this API. If tags have already -// been assigned to the stream, AddTagsToStream overwrites any existing tags that -// correspond to the specified tag keys. AddTagsToStream has a limit of five -// transactions per second per account. +// to 50 tags to a data stream. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// If tags have already been assigned to the stream, AddTagsToStream overwrites +// any existing tags that correspond to the specified tag keys. +// +// AddTagsToStreamhas a limit of five transactions per second per account. func (c *Client) AddTagsToStream(ctx context.Context, params *AddTagsToStreamInput, optFns ...func(*Options)) (*AddTagsToStreamOutput, error) { if params == nil { params = &AddTagsToStreamInput{} @@ -51,6 +55,7 @@ type AddTagsToStreamInput struct { } func (in *AddTagsToStreamInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -117,6 +122,12 @@ func (c *Client) addOperationAddTagsToStreamMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAddTagsToStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go index 4ab7fa0787e..e6225a09284 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go @@ -14,37 +14,47 @@ import ( // Creates a Kinesis data stream. A stream captures and transports data records // that are continuously emitted from different data sources or producers. // Scale-out within a stream is explicitly supported by means of shards, which are -// uniquely identified groups of data records in a stream. You can create your data -// stream using either on-demand or provisioned capacity mode. Data streams with an -// on-demand mode require no capacity planning and automatically scale to handle -// gigabytes of write and read throughput per minute. With the on-demand mode, -// Kinesis Data Streams automatically manages the shards in order to provide the -// necessary throughput. For the data streams with a provisioned mode, you must -// specify the number of shards for the data stream. Each shard can support reads -// up to five transactions per second, up to a maximum data read total of 2 MiB per -// second. Each shard can support writes up to 1,000 records per second, up to a -// maximum data write total of 1 MiB per second. If the amount of data input -// increases or decreases, you can add or remove shards. The stream name identifies -// the stream. The name is scoped to the Amazon Web Services account used by the -// application. It is also scoped by Amazon Web Services Region. That is, two -// streams in two different accounts can have the same name, and two streams in the -// same account, but in two different Regions, can have the same name. CreateStream -// is an asynchronous operation. Upon receiving a CreateStream request, Kinesis -// Data Streams immediately returns and sets the stream status to CREATING . After -// the stream is created, Kinesis Data Streams sets the stream status to ACTIVE . -// You should perform read and write operations only on an ACTIVE stream. You -// receive a LimitExceededException when making a CreateStream request when you -// try to do one of the following: +// uniquely identified groups of data records in a stream. +// +// You can create your data stream using either on-demand or provisioned capacity +// mode. Data streams with an on-demand mode require no capacity planning and +// automatically scale to handle gigabytes of write and read throughput per minute. +// With the on-demand mode, Kinesis Data Streams automatically manages the shards +// in order to provide the necessary throughput. For the data streams with a +// provisioned mode, you must specify the number of shards for the data stream. +// Each shard can support reads up to five transactions per second, up to a maximum +// data read total of 2 MiB per second. Each shard can support writes up to 1,000 +// records per second, up to a maximum data write total of 1 MiB per second. If the +// amount of data input increases or decreases, you can add or remove shards. +// +// The stream name identifies the stream. The name is scoped to the Amazon Web +// Services account used by the application. It is also scoped by Amazon Web +// Services Region. That is, two streams in two different accounts can have the +// same name, and two streams in the same account, but in two different Regions, +// can have the same name. +// +// CreateStream is an asynchronous operation. Upon receiving a CreateStream +// request, Kinesis Data Streams immediately returns and sets the stream status to +// CREATING . After the stream is created, Kinesis Data Streams sets the stream +// status to ACTIVE . You should perform read and write operations only on an +// ACTIVE stream. +// +// You receive a LimitExceededException when making a CreateStream request when +// you try to do one of the following: +// // - Have more than five streams in the CREATING state at any point in time. +// // - Create more shards than are authorized for your account. // -// For the default shard limit for an Amazon Web Services account, see Amazon -// Kinesis Data Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, -// contact Amazon Web Services Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) -// . You can use DescribeStreamSummary to check the stream status, which is -// returned in StreamStatus . CreateStream has a limit of five transactions per -// second per account. +// For the default shard limit for an Amazon Web Services account, see [Amazon Kinesis Data Streams Limits] in the +// Amazon Kinesis Data Streams Developer Guide. To increase this limit, [contact Amazon Web Services Support]. +// +// You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus . +// +// CreateStreamhas a limit of five transactions per second per account. +// +// [contact Amazon Web Services Support]: https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html +// [Amazon Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html func (c *Client) CreateStream(ctx context.Context, params *CreateStreamInput, optFns ...func(*Options)) (*CreateStreamOutput, error) { if params == nil { params = &CreateStreamInput{} @@ -78,7 +88,7 @@ type CreateStreamInput struct { // provisioned throughput. ShardCount *int32 - // Indicates the capacity mode of the data stream. Currently, in Kinesis Data + // Indicates the capacity mode of the data stream. Currently, in Kinesis Data // Streams, you can choose between an on-demand capacity mode and a provisioned // capacity mode for your data streams. StreamModeDetails *types.StreamModeDetails @@ -148,6 +158,12 @@ func (c *Client) addOperationCreateStreamMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DecreaseStreamRetentionPeriod.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DecreaseStreamRetentionPeriod.go index 79eabcc451c..d421520cde8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DecreaseStreamRetentionPeriod.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DecreaseStreamRetentionPeriod.go @@ -13,12 +13,15 @@ import ( // Decreases the Kinesis data stream's retention period, which is the length of // time data records are accessible after they are added to the stream. The minimum -// value of a stream's retention period is 24 hours. When invoking this API, you -// must use either the StreamARN or the StreamName parameter, or both. It is -// recommended that you use the StreamARN input parameter when you invoke this -// API. This operation may result in lost data. For example, if the stream's -// retention period is 48 hours and is decreased to 24 hours, any data already in -// the stream that is older than 24 hours is inaccessible. +// value of a stream's retention period is 24 hours. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// This operation may result in lost data. For example, if the stream's retention +// period is 48 hours and is decreased to 24 hours, any data already in the stream +// that is older than 24 hours is inaccessible. func (c *Client) DecreaseStreamRetentionPeriod(ctx context.Context, params *DecreaseStreamRetentionPeriodInput, optFns ...func(*Options)) (*DecreaseStreamRetentionPeriodOutput, error) { if params == nil { params = &DecreaseStreamRetentionPeriodInput{} @@ -34,7 +37,7 @@ func (c *Client) DecreaseStreamRetentionPeriod(ctx context.Context, params *Decr return out, nil } -// Represents the input for DecreaseStreamRetentionPeriod . +// Represents the input for DecreaseStreamRetentionPeriod. type DecreaseStreamRetentionPeriodInput struct { // The new retention period of the stream, in hours. Must be less than the current @@ -53,6 +56,7 @@ type DecreaseStreamRetentionPeriodInput struct { } func (in *DecreaseStreamRetentionPeriodInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -119,6 +123,12 @@ func (c *Client) addOperationDecreaseStreamRetentionPeriodMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDecreaseStreamRetentionPeriodValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go index 8109cdb53b6..8ebb05c470a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go @@ -13,7 +13,9 @@ import ( // Delete a policy for the specified data stream or consumer. Request patterns can // be one of the following: +// // - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+ +// // - Consumer pattern: // ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+ func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) { @@ -42,6 +44,7 @@ type DeleteResourcePolicyInput struct { } func (in *DeleteResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + p.ResourceARN = in.ResourceARN p.OperationType = ptr.String("control") } @@ -108,6 +111,12 @@ func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go index b672f1eb764..59546447005 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go @@ -14,18 +14,27 @@ import ( // Deletes a Kinesis data stream and all its shards and data. You must shut down // any applications that are operating on the stream before you delete the stream. // If an application attempts to operate on a deleted stream, it receives the -// exception ResourceNotFoundException . When invoking this API, you must use -// either the StreamARN or the StreamName parameter, or both. It is recommended -// that you use the StreamARN input parameter when you invoke this API. If the -// stream is in the ACTIVE state, you can delete it. After a DeleteStream request, -// the specified stream is in the DELETING state until Kinesis Data Streams -// completes the deletion. Note: Kinesis Data Streams might continue to accept data -// read and write operations, such as PutRecord , PutRecords , and GetRecords , on -// a stream in the DELETING state until the stream deletion is complete. When you -// delete a stream, any shards in that stream are also deleted, and any tags are -// dissociated from the stream. You can use the DescribeStreamSummary operation to -// check the state of the stream, which is returned in StreamStatus . DeleteStream -// has a limit of five transactions per second per account. +// exception ResourceNotFoundException . +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream +// request, the specified stream is in the DELETING state until Kinesis Data +// Streams completes the deletion. +// +// Note: Kinesis Data Streams might continue to accept data read and write +// operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the +// stream deletion is complete. +// +// When you delete a stream, any shards in that stream are also deleted, and any +// tags are dissociated from the stream. +// +// You can use the DescribeStreamSummary operation to check the state of the stream, which is returned +// in StreamStatus . +// +// DeleteStreamhas a limit of five transactions per second per account. func (c *Client) DeleteStream(ctx context.Context, params *DeleteStreamInput, optFns ...func(*Options)) (*DeleteStreamOutput, error) { if params == nil { params = &DeleteStreamInput{} @@ -41,7 +50,7 @@ func (c *Client) DeleteStream(ctx context.Context, params *DeleteStreamInput, op return out, nil } -// Represents the input for DeleteStream . +// Represents the input for DeleteStream. type DeleteStreamInput struct { // If this parameter is unset ( null ) or if you set it to false , and the stream @@ -59,6 +68,7 @@ type DeleteStreamInput struct { } func (in *DeleteStreamInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -125,6 +135,12 @@ func (c *Client) addOperationDeleteStreamMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteStream(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go index 6f20829f1e9..6aaf4834cfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go @@ -15,10 +15,11 @@ import ( // ARN of the data stream and the name you gave the consumer when you registered // it. You may also provide all three parameters, as long as they don't conflict // with each other. If you don't know the name or ARN of the consumer that you want -// to deregister, you can use the ListStreamConsumers operation to get a list of -// the descriptions of all the consumers that are currently registered with a given -// data stream. The description of a consumer contains its name and ARN. This -// operation has a limit of five transactions per second per stream. +// to deregister, you can use the ListStreamConsumersoperation to get a list of the descriptions of +// all the consumers that are currently registered with a given data stream. The +// description of a consumer contains its name and ARN. +// +// This operation has a limit of five transactions per second per stream. func (c *Client) DeregisterStreamConsumer(ctx context.Context, params *DeregisterStreamConsumerInput, optFns ...func(*Options)) (*DeregisterStreamConsumerOutput, error) { if params == nil { params = &DeregisterStreamConsumerInput{} @@ -47,15 +48,16 @@ type DeregisterStreamConsumerInput struct { ConsumerName *string // The ARN of the Kinesis data stream that the consumer is registered with. For - // more information, see Amazon Resource Names (ARNs) and Amazon Web Services - // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams) - // . + // more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams StreamARN *string noSmithyDocumentSerde } func (in *DeregisterStreamConsumerInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.ConsumerARN = in.ConsumerARN p.OperationType = ptr.String("control") @@ -123,6 +125,12 @@ func (c *Client) addOperationDeregisterStreamConsumerMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterStreamConsumer(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go index 3e2d9485c68..c1400f25c72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go @@ -10,9 +10,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes the shard limits and usage for the account. If you update your -// account limits, the old limits might be returned for a few minutes. This -// operation has a limit of one transaction per second per account. +// Describes the shard limits and usage for the account. +// +// If you update your account limits, the old limits might be returned for a few +// minutes. +// +// This operation has a limit of one transaction per second per account. func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) { if params == nil { params = &DescribeLimitsInput{} @@ -34,12 +37,12 @@ type DescribeLimitsInput struct { type DescribeLimitsOutput struct { - // Indicates the number of data streams with the on-demand capacity mode. + // Indicates the number of data streams with the on-demand capacity mode. // // This member is required. OnDemandStreamCount *int32 - // The maximum number of data streams with the on-demand capacity mode. + // The maximum number of data streams with the on-demand capacity mode. // // This member is required. OnDemandStreamCountLimit *int32 @@ -115,6 +118,12 @@ func (c *Client) addOperationDescribeLimitsMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLimits(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go index 7b0817028b7..b7b8ea41c03 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go @@ -13,30 +13,39 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - "github.com/jmespath/go-jmespath" + jmespath "github.com/jmespath/go-jmespath" "time" ) -// Describes the specified Kinesis data stream. This API has been revised. It's -// highly recommended that you use the DescribeStreamSummary API to get a -// summarized description of the specified Kinesis data stream and the ListShards -// API to list the shards in a specified data stream and obtain information about -// each shard. When invoking this API, you must use either the StreamARN or the -// StreamName parameter, or both. It is recommended that you use the StreamARN -// input parameter when you invoke this API. The information returned includes the -// stream name, Amazon Resource Name (ARN), creation time, enhanced metric -// configuration, and shard map. The shard map is an array of shard objects. For -// each shard object, there is the hash key and sequence number ranges that the -// shard spans, and the IDs of any earlier shards that played in a role in creating -// the shard. Every record ingested in the stream is identified by a sequence -// number, which is assigned when the record is put into the stream. You can limit -// the number of shards returned by each call. For more information, see -// Retrieving Shards from a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html) -// in the Amazon Kinesis Data Streams Developer Guide. There are no guarantees -// about the chronological order shards returned. To process shards in -// chronological order, use the ID of the parent shard to track the lineage to the -// oldest shard. This operation has a limit of 10 transactions per second per -// account. +// Describes the specified Kinesis data stream. +// +// This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get +// a summarized description of the specified Kinesis data stream and the ListShardsAPI to +// list the shards in a specified data stream and obtain information about each +// shard. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// The information returned includes the stream name, Amazon Resource Name (ARN), +// creation time, enhanced metric configuration, and shard map. The shard map is an +// array of shard objects. For each shard object, there is the hash key and +// sequence number ranges that the shard spans, and the IDs of any earlier shards +// that played in a role in creating the shard. Every record ingested in the stream +// is identified by a sequence number, which is assigned when the record is put +// into the stream. +// +// You can limit the number of shards returned by each call. For more information, +// see [Retrieving Shards from a Stream]in the Amazon Kinesis Data Streams Developer Guide. +// +// There are no guarantees about the chronological order shards returned. To +// process shards in chronological order, use the ID of the parent shard to track +// the lineage to the oldest shard. +// +// This operation has a limit of 10 transactions per second per account. +// +// [Retrieving Shards from a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html func (c *Client) DescribeStream(ctx context.Context, params *DescribeStreamInput, optFns ...func(*Options)) (*DescribeStreamOutput, error) { if params == nil { params = &DescribeStreamInput{} @@ -55,11 +64,13 @@ func (c *Client) DescribeStream(ctx context.Context, params *DescribeStreamInput // Represents the input for DescribeStream . type DescribeStreamInput struct { - // The shard ID of the shard to start with. Specify this parameter to indicate - // that you want to describe the stream starting with the shard whose ID - // immediately follows ExclusiveStartShardId . If you don't specify this parameter, - // the default behavior for DescribeStream is to describe the stream starting with - // the first shard in the stream. + // The shard ID of the shard to start with. + // + // Specify this parameter to indicate that you want to describe the stream + // starting with the shard whose ID immediately follows ExclusiveStartShardId . + // + // If you don't specify this parameter, the default behavior for DescribeStream is + // to describe the stream starting with the first shard in the stream. ExclusiveStartShardId *string // The maximum number of shards to return in a single call. The default value is @@ -76,6 +87,7 @@ type DescribeStreamInput struct { } func (in *DescribeStreamInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -151,6 +163,12 @@ func (c *Client) addOperationDescribeStreamMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeStream(options.Region), middleware.Before); err != nil { return err } @@ -172,14 +190,6 @@ func (c *Client) addOperationDescribeStreamMiddlewares(stack *middleware.Stack, return nil } -// DescribeStreamAPIClient is a client that implements the DescribeStream -// operation. -type DescribeStreamAPIClient interface { - DescribeStream(context.Context, *DescribeStreamInput, ...func(*Options)) (*DescribeStreamOutput, error) -} - -var _ DescribeStreamAPIClient = (*Client)(nil) - // StreamExistsWaiterOptions are waiter options for StreamExistsWaiter type StreamExistsWaiterOptions struct { @@ -212,12 +222,13 @@ type StreamExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeStreamInput, *DescribeStreamOutput, error) (bool, error) } @@ -293,7 +304,13 @@ func (w *StreamExistsWaiter) WaitForOutput(ctx context.Context, params *Describe } out, err := w.client.DescribeStream(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -383,12 +400,13 @@ type StreamNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeStreamInput, *DescribeStreamOutput, error) (bool, error) } @@ -465,7 +483,13 @@ func (w *StreamNotExistsWaiter) WaitForOutput(ctx context.Context, params *Descr } out, err := w.client.DescribeStream(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -513,6 +537,14 @@ func streamNotExistsStateRetryable(ctx context.Context, input *DescribeStreamInp return true, nil } +// DescribeStreamAPIClient is a client that implements the DescribeStream +// operation. +type DescribeStreamAPIClient interface { + DescribeStream(context.Context, *DescribeStreamInput, ...func(*Options)) (*DescribeStreamOutput, error) +} + +var _ DescribeStreamAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opDescribeStream(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go index b5bc8d09f54..384b0b00104 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go @@ -16,12 +16,14 @@ import ( // consumer. Alternatively, you can provide the ARN of the data stream and the name // you gave the consumer when you registered it. You may also provide all three // parameters, as long as they don't conflict with each other. If you don't know -// the name or ARN of the consumer that you want to describe, you can use the -// ListStreamConsumers operation to get a list of the descriptions of all the -// consumers that are currently registered with a given data stream. This operation -// has a limit of 20 transactions per second per stream. When making a -// cross-account call with DescribeStreamConsumer , make sure to provide the ARN of -// the consumer. +// the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers +// operation to get a list of the descriptions of all the consumers that are +// currently registered with a given data stream. +// +// This operation has a limit of 20 transactions per second per stream. +// +// When making a cross-account call with DescribeStreamConsumer , make sure to +// provide the ARN of the consumer. func (c *Client) DescribeStreamConsumer(ctx context.Context, params *DescribeStreamConsumerInput, optFns ...func(*Options)) (*DescribeStreamConsumerOutput, error) { if params == nil { params = &DescribeStreamConsumerInput{} @@ -46,15 +48,16 @@ type DescribeStreamConsumerInput struct { ConsumerName *string // The ARN of the Kinesis data stream that the consumer is registered with. For - // more information, see Amazon Resource Names (ARNs) and Amazon Web Services - // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams) - // . + // more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams StreamARN *string noSmithyDocumentSerde } func (in *DescribeStreamConsumerInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.ConsumerARN = in.ConsumerARN p.OperationType = ptr.String("control") @@ -128,6 +131,12 @@ func (c *Client) addOperationDescribeStreamConsumerMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeStreamConsumer(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go index 76e93fb4ce6..ae9bb977f73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go @@ -13,12 +13,17 @@ import ( ) // Provides a summarized description of the specified Kinesis data stream without -// the shard list. When invoking this API, you must use either the StreamARN or -// the StreamName parameter, or both. It is recommended that you use the StreamARN -// input parameter when you invoke this API. The information returned includes the -// stream name, Amazon Resource Name (ARN), status, record retention period, -// approximate creation time, monitoring, encryption details, and open shard count. -// DescribeStreamSummary has a limit of 20 transactions per second per account. +// the shard list. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// The information returned includes the stream name, Amazon Resource Name (ARN), +// status, record retention period, approximate creation time, monitoring, +// encryption details, and open shard count. +// +// DescribeStreamSummaryhas a limit of 20 transactions per second per account. func (c *Client) DescribeStreamSummary(ctx context.Context, params *DescribeStreamSummaryInput, optFns ...func(*Options)) (*DescribeStreamSummaryOutput, error) { if params == nil { params = &DescribeStreamSummaryInput{} @@ -46,6 +51,7 @@ type DescribeStreamSummaryInput struct { } func (in *DescribeStreamSummaryInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -118,6 +124,12 @@ func (c *Client) addOperationDescribeStreamSummaryMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeStreamSummary(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go index 8a78d53ef5a..4d3c8a3cd72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go @@ -12,9 +12,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Disables enhanced monitoring. When invoking this API, you must use either the -// StreamARN or the StreamName parameter, or both. It is recommended that you use -// the StreamARN input parameter when you invoke this API. +// Disables enhanced monitoring. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. func (c *Client) DisableEnhancedMonitoring(ctx context.Context, params *DisableEnhancedMonitoringInput, optFns ...func(*Options)) (*DisableEnhancedMonitoringOutput, error) { if params == nil { params = &DisableEnhancedMonitoringInput{} @@ -30,22 +32,33 @@ func (c *Client) DisableEnhancedMonitoring(ctx context.Context, params *DisableE return out, nil } -// Represents the input for DisableEnhancedMonitoring . +// Represents the input for DisableEnhancedMonitoring. type DisableEnhancedMonitoringInput struct { - // List of shard-level metrics to disable. The following are the valid shard-level - // metrics. The value " ALL " disables every metric. + // List of shard-level metrics to disable. + // + // The following are the valid shard-level metrics. The value " ALL " disables + // every metric. + // // - IncomingBytes + // // - IncomingRecords + // // - OutgoingBytes + // // - OutgoingRecords + // // - WriteProvisionedThroughputExceeded + // // - ReadProvisionedThroughputExceeded + // // - IteratorAgeMilliseconds + // // - ALL - // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) - // in the Amazon Kinesis Data Streams Developer Guide. + // + // For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide. + // + // [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html // // This member is required. ShardLevelMetrics []types.MetricsName @@ -60,12 +73,12 @@ type DisableEnhancedMonitoringInput struct { } func (in *DisableEnhancedMonitoringInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } -// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring -// . +// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring. type DisableEnhancedMonitoringOutput struct { // Represents the current state of the metrics that are in the enhanced state @@ -143,6 +156,12 @@ func (c *Client) addOperationDisableEnhancedMonitoringMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDisableEnhancedMonitoringValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go index bd7c977503f..3dc3d780c93 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Enables enhanced Kinesis data stream monitoring for shard-level metrics. When -// invoking this API, you must use either the StreamARN or the StreamName +// Enables enhanced Kinesis data stream monitoring for shard-level metrics. +// +// When invoking this API, you must use either the StreamARN or the StreamName // parameter, or both. It is recommended that you use the StreamARN input // parameter when you invoke this API. func (c *Client) EnableEnhancedMonitoring(ctx context.Context, params *EnableEnhancedMonitoringInput, optFns ...func(*Options)) (*EnableEnhancedMonitoringOutput, error) { @@ -31,22 +32,33 @@ func (c *Client) EnableEnhancedMonitoring(ctx context.Context, params *EnableEnh return out, nil } -// Represents the input for EnableEnhancedMonitoring . +// Represents the input for EnableEnhancedMonitoring. type EnableEnhancedMonitoringInput struct { - // List of shard-level metrics to enable. The following are the valid shard-level - // metrics. The value " ALL " enables every metric. + // List of shard-level metrics to enable. + // + // The following are the valid shard-level metrics. The value " ALL " enables every + // metric. + // // - IncomingBytes + // // - IncomingRecords + // // - OutgoingBytes + // // - OutgoingRecords + // // - WriteProvisionedThroughputExceeded + // // - ReadProvisionedThroughputExceeded + // // - IteratorAgeMilliseconds + // // - ALL - // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) - // in the Amazon Kinesis Data Streams Developer Guide. + // + // For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide. + // + // [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html // // This member is required. ShardLevelMetrics []types.MetricsName @@ -61,12 +73,12 @@ type EnableEnhancedMonitoringInput struct { } func (in *EnableEnhancedMonitoringInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } -// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring -// . +// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring. type EnableEnhancedMonitoringOutput struct { // Represents the current state of the metrics that are in the enhanced state @@ -144,6 +156,12 @@ func (c *Client) addOperationEnableEnhancedMonitoringMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpEnableEnhancedMonitoringValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go index 7cb5599764f..2a3bd737e2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go @@ -14,57 +14,69 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets data records from a Kinesis data stream's shard. When invoking this API, -// you must use either the StreamARN or the StreamName parameter, or both. It is -// recommended that you use the StreamARN input parameter when you invoke this -// API. Specify a shard iterator using the ShardIterator parameter. The shard -// iterator specifies the position in the shard from which you want to start -// reading data records sequentially. If there are no records available in the -// portion of the shard that the iterator points to, GetRecords returns an empty -// list. It might take multiple calls to get to a portion of the shard that -// contains records. You can scale by provisioning multiple shards per stream while -// considering service limits (for more information, see Amazon Kinesis Data -// Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide). Your application should -// have one thread per shard, each reading continuously from its stream. To read -// from a stream continually, call GetRecords in a loop. Use GetShardIterator to -// get the shard iterator to specify in the first GetRecords call. GetRecords +// Gets data records from a Kinesis data stream's shard. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading data +// records sequentially. If there are no records available in the portion of the +// shard that the iterator points to, GetRecordsreturns an empty list. It might take +// multiple calls to get to a portion of the shard that contains records. +// +// You can scale by provisioning multiple shards per stream while considering +// service limits (for more information, see [Amazon Kinesis Data Streams Limits]in the Amazon Kinesis Data Streams +// Developer Guide). Your application should have one thread per shard, each +// reading continuously from its stream. To read from a stream continually, call GetRecords +// in a loop. Use GetShardIteratorto get the shard iterator to specify in the first GetRecords call. GetRecords // returns a new shard iterator in NextShardIterator . Specify the shard iterator -// returned in NextShardIterator in subsequent calls to GetRecords . If the shard -// has been closed, the shard iterator can't return more data and GetRecords -// returns null in NextShardIterator . You can terminate the loop when the shard is -// closed, or when the shard iterator reaches the record with the sequence number -// or other attribute that marks it as the last record to process. Each data record -// can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You -// can ensure that your calls don't exceed the maximum supported size or throughput -// by using the Limit parameter to specify the maximum number of records that -// GetRecords can return. Consider your average record size when determining this -// limit. The maximum number of records that can be returned per call is 10,000. -// The size of the data returned by GetRecords varies depending on the utilization -// of the shard. It is recommended that consumer applications retrieve records via -// the GetRecords command using the 5 TPS limit to remain caught up. Retrieving -// records less frequently can lead to consumer applications falling behind. The -// maximum size of data that GetRecords can return is 10 MiB. If a call returns -// this amount of data, subsequent calls made within the next 5 seconds throw +// returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been +// closed, the shard iterator can't return more data and GetRecordsreturns null in +// NextShardIterator . You can terminate the loop when the shard is closed, or when +// the shard iterator reaches the record with the sequence number or other +// attribute that marks it as the last record to process. +// +// Each data record can be up to 1 MiB in size, and each shard can read up to 2 +// MiB per second. You can ensure that your calls don't exceed the maximum +// supported size or throughput by using the Limit parameter to specify the +// maximum number of records that GetRecordscan return. Consider your average record size +// when determining this limit. The maximum number of records that can be returned +// per call is 10,000. +// +// The size of the data returned by GetRecords varies depending on the utilization of the +// shard. It is recommended that consumer applications retrieve records via the +// GetRecords command using the 5 TPS limit to remain caught up. Retrieving records +// less frequently can lead to consumer applications falling behind. The maximum +// size of data that GetRecordscan return is 10 MiB. If a call returns this amount of data, +// subsequent calls made within the next 5 seconds throw // ProvisionedThroughputExceededException . If there is insufficient provisioned // throughput on the stream, subsequent calls made within the next 1 second throw -// ProvisionedThroughputExceededException . GetRecords doesn't return any data -// when it throws an exception. For this reason, we recommend that you wait 1 -// second between calls to GetRecords . However, it's possible that the application -// will get exceptions for longer than 1 second. To detect whether the application -// is falling behind in processing, you can use the MillisBehindLatest response -// attribute. You can also monitor the stream using CloudWatch metrics and other -// mechanisms (see Monitoring (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) -// in the Amazon Kinesis Data Streams Developer Guide). Each Amazon Kinesis record -// includes a value, ApproximateArrivalTimestamp , that is set when a stream -// successfully receives and stores a record. This is commonly referred to as a -// server-side time stamp, whereas a client-side time stamp is set when a data -// producer creates or sends the record to a stream (a data producer is any data -// source putting data records into a stream, for example with PutRecords ). The +// ProvisionedThroughputExceededException . GetRecords doesn't return any data when it +// throws an exception. For this reason, we recommend that you wait 1 second +// between calls to GetRecords. However, it's possible that the application will get +// exceptions for longer than 1 second. +// +// To detect whether the application is falling behind in processing, you can use +// the MillisBehindLatest response attribute. You can also monitor the stream +// using CloudWatch metrics and other mechanisms (see [Monitoring]in the Amazon Kinesis Data +// Streams Developer Guide). +// +// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp , that +// is set when a stream successfully receives and stores a record. This is commonly +// referred to as a server-side time stamp, whereas a client-side time stamp is set +// when a data producer creates or sends the record to a stream (a data producer is +// any data source putting data records into a stream, for example with PutRecords). The // time stamp has millisecond precision. There are no guarantees about the time // stamp accuracy, or that the time stamp is always increasing. For example, // records in a shard or across a stream might have time stamps that are out of -// order. This operation has a limit of five transactions per second per shard. +// order. +// +// This operation has a limit of five transactions per second per shard. +// +// [Amazon Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html +// [Monitoring]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html func (c *Client) GetRecords(ctx context.Context, params *GetRecordsInput, optFns ...func(*Options)) (*GetRecordsOutput, error) { if params == nil { params = &GetRecordsInput{} @@ -80,7 +92,7 @@ func (c *Client) GetRecords(ctx context.Context, params *GetRecordsInput, optFns return out, nil } -// Represents the input for GetRecords . +// Represents the input for GetRecords. type GetRecordsInput struct { // The position in the shard from which you want to start sequentially reading @@ -91,8 +103,8 @@ type GetRecordsInput struct { ShardIterator *string // The maximum number of records to return. Specify a value of up to 10,000. If - // you specify a value that is greater than 10,000, GetRecords throws - // InvalidArgumentException . The default value is 10,000. + // you specify a value that is greater than 10,000, GetRecordsthrows InvalidArgumentException + // . The default value is 10,000. Limit *int32 // The ARN of the stream. @@ -102,11 +114,12 @@ type GetRecordsInput struct { } func (in *GetRecordsInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("data") } -// Represents the output for GetRecords . +// Represents the output for GetRecords. type GetRecordsOutput struct { // The data records retrieved from the shard. @@ -118,8 +131,8 @@ type GetRecordsOutput struct { // response only when the end of the current shard is reached. ChildShards []types.ChildShard - // The number of milliseconds the GetRecords response is from the tip of the - // stream, indicating how far behind current time the consumer is. A value of zero + // The number of milliseconds the GetRecords response is from the tip of the stream, + // indicating how far behind current time the consumer is. A value of zero // indicates that record processing is caught up, and there are no new records to // process at this moment. MillisBehindLatest *int64 @@ -190,6 +203,12 @@ func (c *Client) addOperationGetRecordsMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetRecordsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go index 88192a4fa6b..06a9f3b4676 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go @@ -13,7 +13,9 @@ import ( // Returns a policy attached to the specified data stream or consumer. Request // patterns can be one of the following: +// // - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+ +// // - Consumer pattern: // ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+ func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { @@ -42,6 +44,7 @@ type GetResourcePolicyInput struct { } func (in *GetResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + p.ResourceARN = in.ResourceARN p.OperationType = ptr.String("control") } @@ -114,6 +117,12 @@ func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go index d69445cd971..db5051950d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go @@ -14,37 +14,45 @@ import ( ) // Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after -// it is returned to the requester. When invoking this API, you must use either the -// StreamARN or the StreamName parameter, or both. It is recommended that you use -// the StreamARN input parameter when you invoke this API. A shard iterator -// specifies the shard position from which to start reading data records -// sequentially. The position is specified using the sequence number of a data -// record in a shard. A sequence number is the identifier associated with every -// record ingested in the stream, and is assigned when a record is put into the -// stream. Each stream has one or more shards. You must specify the shard iterator -// type. For example, you can set the ShardIteratorType parameter to read exactly -// from the position denoted by a specific sequence number by using the -// AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read -// right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard -// iterator type, using sequence numbers returned by earlier calls to PutRecord , -// PutRecords , GetRecords , or DescribeStream . In the request, you can specify -// the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in -// time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record -// in the shard in the system (the oldest data record in the shard), or LATEST so -// that you always read the most recent data in the shard. When you read repeatedly -// from a stream, use a GetShardIterator request to get the first shard iterator -// for use in your first GetRecords request and for subsequent reads use the shard -// iterator returned by the GetRecords request in NextShardIterator . A new shard -// iterator is returned by every GetRecords request in NextShardIterator , which -// you use in the ShardIterator parameter of the next GetRecords request. If a -// GetShardIterator request is made too often, you receive a +// it is returned to the requester. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// A shard iterator specifies the shard position from which to start reading data +// records sequentially. The position is specified using the sequence number of a +// data record in a shard. A sequence number is the identifier associated with +// every record ingested in the stream, and is assigned when a record is put into +// the stream. Each stream has one or more shards. +// +// You must specify the shard iterator type. For example, you can set the +// ShardIteratorType parameter to read exactly from the position denoted by a +// specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type. +// Alternatively, the parameter can read right after the sequence number by using +// the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned +// by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard +// iterator type AT_TIMESTAMP to read records from an arbitrary point in time, +// TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in +// the shard in the system (the oldest data record in the shard), or LATEST so +// that you always read the most recent data in the shard. +// +// When you read repeatedly from a stream, use a GetShardIterator request to get the first shard +// iterator for use in your first GetRecordsrequest and for subsequent reads use the shard +// iterator returned by the GetRecordsrequest in NextShardIterator . A new shard iterator is +// returned by every GetRecordsrequest in NextShardIterator , which you use in the +// ShardIterator parameter of the next GetRecords request. +// +// If a GetShardIterator request is made too often, you receive a // ProvisionedThroughputExceededException . For more information about throughput -// limits, see GetRecords , and Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide. If the shard is closed, -// GetShardIterator returns a valid iterator for the last sequence number of the -// shard. A shard can be closed as a result of using SplitShard or MergeShards . -// GetShardIterator has a limit of five transactions per second per account per -// open shard. +// limits, see GetRecords, and [Streams Limits] in the Amazon Kinesis Data Streams Developer Guide. +// +// If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number +// of the shard. A shard can be closed as a result of using SplitShardor MergeShards. +// +// GetShardIteratorhas a limit of five transactions per second per account per open shard. +// +// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html func (c *Client) GetShardIterator(ctx context.Context, params *GetShardIteratorInput, optFns ...func(*Options)) (*GetShardIteratorOutput, error) { if params == nil { params = &GetShardIteratorInput{} @@ -69,15 +77,22 @@ type GetShardIteratorInput struct { ShardId *string // Determines how the shard iterator is used to start reading data records from - // the shard. The following are the valid Amazon Kinesis shard iterator types: + // the shard. + // + // The following are the valid Amazon Kinesis shard iterator types: + // // - AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific // sequence number, provided in the value StartingSequenceNumber . + // // - AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a // specific sequence number, provided in the value StartingSequenceNumber . + // // - AT_TIMESTAMP - Start reading from the position denoted by a specific time // stamp, provided in the value Timestamp . + // // - TRIM_HORIZON - Start reading at the last untrimmed record in the shard in // the system, which is the oldest data record in the shard. + // // - LATEST - Start reading just after the most recent record in the shard, so // that you always read the most recent data in the shard. // @@ -108,6 +123,7 @@ type GetShardIteratorInput struct { } func (in *GetShardIteratorInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("data") } @@ -181,6 +197,12 @@ func (c *Client) addOperationGetShardIteratorMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetShardIteratorValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_IncreaseStreamRetentionPeriod.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_IncreaseStreamRetentionPeriod.go index 40e8bc66894..9882f320265 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_IncreaseStreamRetentionPeriod.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_IncreaseStreamRetentionPeriod.go @@ -13,16 +13,19 @@ import ( // Increases the Kinesis data stream's retention period, which is the length of // time data records are accessible after they are added to the stream. The maximum -// value of a stream's retention period is 8760 hours (365 days). When invoking -// this API, you must use either the StreamARN or the StreamName parameter, or -// both. It is recommended that you use the StreamARN input parameter when you -// invoke this API. If you choose a longer stream retention period, this operation -// increases the time period during which records that have not yet expired are -// accessible. However, it does not make previous, expired data (older than the -// stream's previous retention period) accessible after the operation has been -// called. For example, if a stream's retention period is set to 24 hours and is -// increased to 168 hours, any data that is older than 24 hours remains -// inaccessible to consumer applications. +// value of a stream's retention period is 8760 hours (365 days). +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// If you choose a longer stream retention period, this operation increases the +// time period during which records that have not yet expired are accessible. +// However, it does not make previous, expired data (older than the stream's +// previous retention period) accessible after the operation has been called. For +// example, if a stream's retention period is set to 24 hours and is increased to +// 168 hours, any data that is older than 24 hours remains inaccessible to consumer +// applications. func (c *Client) IncreaseStreamRetentionPeriod(ctx context.Context, params *IncreaseStreamRetentionPeriodInput, optFns ...func(*Options)) (*IncreaseStreamRetentionPeriodOutput, error) { if params == nil { params = &IncreaseStreamRetentionPeriodInput{} @@ -38,7 +41,7 @@ func (c *Client) IncreaseStreamRetentionPeriod(ctx context.Context, params *Incr return out, nil } -// Represents the input for IncreaseStreamRetentionPeriod . +// Represents the input for IncreaseStreamRetentionPeriod. type IncreaseStreamRetentionPeriodInput struct { // The new retention period of the stream, in hours. Must be more than the current @@ -57,6 +60,7 @@ type IncreaseStreamRetentionPeriodInput struct { } func (in *IncreaseStreamRetentionPeriodInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -123,6 +127,12 @@ func (c *Client) addOperationIncreaseStreamRetentionPeriodMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpIncreaseStreamRetentionPeriodValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go index fe0425d7a00..3a995c84a82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go @@ -14,18 +14,22 @@ import ( ) // Lists the shards in a stream and provides information about each shard. This -// operation has a limit of 1000 transactions per second per data stream. When -// invoking this API, you must use either the StreamARN or the StreamName +// operation has a limit of 1000 transactions per second per data stream. +// +// When invoking this API, you must use either the StreamARN or the StreamName // parameter, or both. It is recommended that you use the StreamARN input -// parameter when you invoke this API. This action does not list expired shards. -// For information about expired shards, see Data Routing, Data Persistence, and -// Shard State after a Reshard (https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing) -// . This API is a new operation that is used by the Amazon Kinesis Client Library +// parameter when you invoke this API. +// +// This action does not list expired shards. For information about expired shards, +// see [Data Routing, Data Persistence, and Shard State after a Reshard]. +// +// This API is a new operation that is used by the Amazon Kinesis Client Library // (KCL). If you have a fine-grained IAM policy that only allows specific // operations, you must update your policy to allow calls to this API. For more -// information, see Controlling Access to Amazon Kinesis Data Streams Resources -// Using IAM (https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html) -// . +// information, see [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]. +// +// [Data Routing, Data Persistence, and Shard State after a Reshard]: https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing +// [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]: https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html func (c *Client) ListShards(ctx context.Context, params *ListShardsInput, optFns ...func(*Options)) (*ListShardsOutput, error) { if params == nil { params = &ListShardsInput{} @@ -44,15 +48,18 @@ func (c *Client) ListShards(ctx context.Context, params *ListShardsInput, optFns type ListShardsInput struct { // Specify this parameter to indicate that you want to list the shards starting - // with the shard whose ID immediately follows ExclusiveStartShardId . If you don't - // specify this parameter, the default behavior is for ListShards to list the - // shards starting with the first one in the stream. You cannot specify this - // parameter if you specify NextToken . + // with the shard whose ID immediately follows ExclusiveStartShardId . + // + // If you don't specify this parameter, the default behavior is for ListShards to + // list the shards starting with the first one in the stream. + // + // You cannot specify this parameter if you specify NextToken . ExclusiveStartShardId *string // The maximum number of shards to return in a single call to ListShards . The // maximum number of shards to return in a single call. The default value is 1000. // If you specify a value greater than 1000, at most 1000 results are returned. + // // When the number of shards to be listed is greater than the value of MaxResults , // the response contains a NextToken value that you can use in a subsequent call // to ListShards to list the next set of shards. @@ -63,33 +70,42 @@ type ListShardsInput struct { // MaxResults that is less than the number of shards in the data stream, the // response includes a pagination token named NextToken . You can specify this // NextToken value in a subsequent call to ListShards to list the next set of - // shards. Don't specify StreamName or StreamCreationTimestamp if you specify - // NextToken because the latter unambiguously identifies the stream. You can - // optionally specify a value for the MaxResults parameter when you specify - // NextToken . If you specify a MaxResults value that is less than the number of - // shards that the operation returns if you don't specify MaxResults , the response - // will contain a new NextToken value. You can use the new NextToken value in a - // subsequent call to the ListShards operation. Tokens expire after 300 seconds. - // When you obtain a value for NextToken in the response to a call to ListShards , - // you have 300 seconds to use that value. If you specify an expired token in a - // call to ListShards , you get ExpiredNextTokenException . + // shards. + // + // Don't specify StreamName or StreamCreationTimestamp if you specify NextToken + // because the latter unambiguously identifies the stream. + // + // You can optionally specify a value for the MaxResults parameter when you + // specify NextToken . If you specify a MaxResults value that is less than the + // number of shards that the operation returns if you don't specify MaxResults , + // the response will contain a new NextToken value. You can use the new NextToken + // value in a subsequent call to the ListShards operation. + // + // Tokens expire after 300 seconds. When you obtain a value for NextToken in the + // response to a call to ListShards , you have 300 seconds to use that value. If + // you specify an expired token in a call to ListShards , you get + // ExpiredNextTokenException . NextToken *string // Enables you to filter out the response of the ListShards API. You can only - // specify one filter at a time. If you use the ShardFilter parameter when - // invoking the ListShards API, the Type is the required property and must be - // specified. If you specify the AT_TRIM_HORIZON , FROM_TRIM_HORIZON , or AT_LATEST - // types, you do not need to specify either the ShardId or the Timestamp optional - // properties. If you specify the AFTER_SHARD_ID type, you must also provide the - // value for the optional ShardId property. The ShardId property is identical in - // fuctionality to the ExclusiveStartShardId parameter of the ListShards API. When - // ShardId property is specified, the response includes the shards starting with - // the shard whose ID immediately follows the ShardId that you provided. If you - // specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type, you must also provide the - // value for the optional Timestamp property. If you specify the AT_TIMESTAMP - // type, then all shards that were open at the provided timestamp are returned. If - // you specify the FROM_TIMESTAMP type, then all shards starting from the provided - // timestamp to TIP are returned. + // specify one filter at a time. + // + // If you use the ShardFilter parameter when invoking the ListShards API, the Type + // is the required property and must be specified. If you specify the + // AT_TRIM_HORIZON , FROM_TRIM_HORIZON , or AT_LATEST types, you do not need to + // specify either the ShardId or the Timestamp optional properties. + // + // If you specify the AFTER_SHARD_ID type, you must also provide the value for the + // optional ShardId property. The ShardId property is identical in fuctionality to + // the ExclusiveStartShardId parameter of the ListShards API. When ShardId + // property is specified, the response includes the shards starting with the shard + // whose ID immediately follows the ShardId that you provided. + // + // If you specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type, you must also + // provide the value for the optional Timestamp property. If you specify the + // AT_TIMESTAMP type, then all shards that were open at the provided timestamp are + // returned. If you specify the FROM_TIMESTAMP type, then all shards starting from + // the provided timestamp to TIP are returned. ShardFilter *types.ShardFilter // The ARN of the stream. @@ -98,18 +114,21 @@ type ListShardsInput struct { // Specify this input parameter to distinguish data streams that have the same // name. For example, if you create a data stream and then delete it, and you later // create another data stream with the same name, you can use this input parameter - // to specify which of the two streams you want to list the shards for. You cannot - // specify this parameter if you specify the NextToken parameter. + // to specify which of the two streams you want to list the shards for. + // + // You cannot specify this parameter if you specify the NextToken parameter. StreamCreationTimestamp *time.Time - // The name of the data stream whose shards you want to list. You cannot specify - // this parameter if you specify the NextToken parameter. + // The name of the data stream whose shards you want to list. + // + // You cannot specify this parameter if you specify the NextToken parameter. StreamName *string noSmithyDocumentSerde } func (in *ListShardsInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -122,10 +141,12 @@ type ListShardsOutput struct { // response includes a pagination token named NextToken . You can specify this // NextToken value in a subsequent call to ListShards to list the next set of // shards. For more information about the use of this pagination token when calling - // the ListShards operation, see ListShardsInput$NextToken . Tokens expire after - // 300 seconds. When you obtain a value for NextToken in the response to a call to - // ListShards , you have 300 seconds to use that value. If you specify an expired - // token in a call to ListShards , you get ExpiredNextTokenException . + // the ListShards operation, see ListShardsInput$NextToken. + // + // Tokens expire after 300 seconds. When you obtain a value for NextToken in the + // response to a call to ListShards , you have 300 seconds to use that value. If + // you specify an expired token in a call to ListShards , you get + // ExpiredNextTokenException . NextToken *string // An array of JSON objects. Each object represents one shard and specifies the @@ -195,6 +216,12 @@ func (c *Client) addOperationListShardsMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListShardsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go index 0cf8e26b992..5f2eb9dfd98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go @@ -14,8 +14,9 @@ import ( ) // Lists the consumers registered to receive data from a stream using enhanced -// fan-out, and provides information about each consumer. This operation has a -// limit of 5 transactions per second per stream. +// fan-out, and provides information about each consumer. +// +// This operation has a limit of 5 transactions per second per stream. func (c *Client) ListStreamConsumers(ctx context.Context, params *ListStreamConsumersInput, optFns ...func(*Options)) (*ListStreamConsumersOutput, error) { if params == nil { params = &ListStreamConsumersInput{} @@ -34,9 +35,9 @@ func (c *Client) ListStreamConsumers(ctx context.Context, params *ListStreamCons type ListStreamConsumersInput struct { // The ARN of the Kinesis data stream for which you want to list the registered - // consumers. For more information, see Amazon Resource Names (ARNs) and Amazon - // Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams) - // . + // consumers. For more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams // // This member is required. StreamARN *string @@ -52,30 +53,37 @@ type ListStreamConsumersInput struct { // consumers that are registered with the data stream, the response includes a // pagination token named NextToken . You can specify this NextToken value in a // subsequent call to ListStreamConsumers to list the next set of registered - // consumers. Don't specify StreamName or StreamCreationTimestamp if you specify - // NextToken because the latter unambiguously identifies the stream. You can - // optionally specify a value for the MaxResults parameter when you specify - // NextToken . If you specify a MaxResults value that is less than the number of - // consumers that the operation returns if you don't specify MaxResults , the - // response will contain a new NextToken value. You can use the new NextToken + // consumers. + // + // Don't specify StreamName or StreamCreationTimestamp if you specify NextToken + // because the latter unambiguously identifies the stream. + // + // You can optionally specify a value for the MaxResults parameter when you + // specify NextToken . If you specify a MaxResults value that is less than the + // number of consumers that the operation returns if you don't specify MaxResults , + // the response will contain a new NextToken value. You can use the new NextToken // value in a subsequent call to the ListStreamConsumers operation to list the - // next set of consumers. Tokens expire after 300 seconds. When you obtain a value - // for NextToken in the response to a call to ListStreamConsumers , you have 300 - // seconds to use that value. If you specify an expired token in a call to - // ListStreamConsumers , you get ExpiredNextTokenException . + // next set of consumers. + // + // Tokens expire after 300 seconds. When you obtain a value for NextToken in the + // response to a call to ListStreamConsumers , you have 300 seconds to use that + // value. If you specify an expired token in a call to ListStreamConsumers , you + // get ExpiredNextTokenException . NextToken *string // Specify this input parameter to distinguish data streams that have the same // name. For example, if you create a data stream and then delete it, and you later // create another data stream with the same name, you can use this input parameter - // to specify which of the two streams you want to list the consumers for. You - // can't specify this parameter if you specify the NextToken parameter. + // to specify which of the two streams you want to list the consumers for. + // + // You can't specify this parameter if you specify the NextToken parameter. StreamCreationTimestamp *time.Time noSmithyDocumentSerde } func (in *ListStreamConsumersInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -91,11 +99,12 @@ type ListStreamConsumersOutput struct { // registered consumers, the response includes a pagination token named NextToken . // You can specify this NextToken value in a subsequent call to ListStreamConsumers // to list the next set of registered consumers. For more information about the use - // of this pagination token when calling the ListStreamConsumers operation, see - // ListStreamConsumersInput$NextToken . Tokens expire after 300 seconds. When you - // obtain a value for NextToken in the response to a call to ListStreamConsumers , - // you have 300 seconds to use that value. If you specify an expired token in a - // call to ListStreamConsumers , you get ExpiredNextTokenException . + // of this pagination token when calling the ListStreamConsumers operation, see ListStreamConsumersInput$NextToken. + // + // Tokens expire after 300 seconds. When you obtain a value for NextToken in the + // response to a call to ListStreamConsumers , you have 300 seconds to use that + // value. If you specify an expired token in a call to ListStreamConsumers , you + // get ExpiredNextTokenException . NextToken *string // Metadata pertaining to the operation's result. @@ -159,6 +168,12 @@ func (c *Client) addOperationListStreamConsumersMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListStreamConsumersValidationMiddleware(stack); err != nil { return err } @@ -183,14 +198,6 @@ func (c *Client) addOperationListStreamConsumersMiddlewares(stack *middleware.St return nil } -// ListStreamConsumersAPIClient is a client that implements the -// ListStreamConsumers operation. -type ListStreamConsumersAPIClient interface { - ListStreamConsumers(context.Context, *ListStreamConsumersInput, ...func(*Options)) (*ListStreamConsumersOutput, error) -} - -var _ ListStreamConsumersAPIClient = (*Client)(nil) - // ListStreamConsumersPaginatorOptions is the paginator options for // ListStreamConsumers type ListStreamConsumersPaginatorOptions struct { @@ -257,6 +264,9 @@ func (p *ListStreamConsumersPaginator) NextPage(ctx context.Context, optFns ...f } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListStreamConsumers(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -276,6 +286,14 @@ func (p *ListStreamConsumersPaginator) NextPage(ctx context.Context, optFns ...f return result, nil } +// ListStreamConsumersAPIClient is a client that implements the +// ListStreamConsumers operation. +type ListStreamConsumersAPIClient interface { + ListStreamConsumers(context.Context, *ListStreamConsumersInput, ...func(*Options)) (*ListStreamConsumersOutput, error) +} + +var _ ListStreamConsumersAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListStreamConsumers(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go index a596b1e478c..d49bd06aa92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go @@ -11,18 +11,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists your Kinesis data streams. The number of streams may be too large to -// return from a single call to ListStreams . You can limit the number of returned -// streams using the Limit parameter. If you do not specify a value for the Limit -// parameter, Kinesis Data Streams uses the default limit, which is currently 100. +// Lists your Kinesis data streams. +// +// The number of streams may be too large to return from a single call to +// ListStreams . You can limit the number of returned streams using the Limit +// parameter. If you do not specify a value for the Limit parameter, Kinesis Data +// Streams uses the default limit, which is currently 100. +// // You can detect if there are more streams available to list by using the // HasMoreStreams flag from the returned output. If there are more streams // available, you can request more streams by using the name of the last stream // returned by the ListStreams request in the ExclusiveStartStreamName parameter // in a subsequent request to ListStreams . The group of stream names returned by // the subsequent request is then added to the list. You can continue this process -// until all the stream names have been collected in the list. ListStreams has a -// limit of five transactions per second per account. +// until all the stream names have been collected in the list. +// +// ListStreamshas a limit of five transactions per second per account. func (c *Client) ListStreams(ctx context.Context, params *ListStreamsInput, optFns ...func(*Options)) (*ListStreamsOutput, error) { if params == nil { params = &ListStreamsInput{} @@ -135,6 +139,12 @@ func (c *Client) addOperationListStreamsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListStreams(options.Region), middleware.Before); err != nil { return err } @@ -156,13 +166,6 @@ func (c *Client) addOperationListStreamsMiddlewares(stack *middleware.Stack, opt return nil } -// ListStreamsAPIClient is a client that implements the ListStreams operation. -type ListStreamsAPIClient interface { - ListStreams(context.Context, *ListStreamsInput, ...func(*Options)) (*ListStreamsOutput, error) -} - -var _ ListStreamsAPIClient = (*Client)(nil) - // ListStreamsPaginatorOptions is the paginator options for ListStreams type ListStreamsPaginatorOptions struct { // The maximum number of streams to list. The default value is 100. If you specify @@ -227,6 +230,9 @@ func (p *ListStreamsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt } params.Limit = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListStreams(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -246,6 +252,13 @@ func (p *ListStreamsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt return result, nil } +// ListStreamsAPIClient is a client that implements the ListStreams operation. +type ListStreamsAPIClient interface { + ListStreams(context.Context, *ListStreamsInput, ...func(*Options)) (*ListStreamsOutput, error) +} + +var _ ListStreamsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListStreams(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go index 7e05e760bd4..4e8e60c7b17 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go @@ -13,9 +13,11 @@ import ( ) // Lists the tags for the specified Kinesis data stream. This operation has a -// limit of five transactions per second per account. When invoking this API, you -// must use either the StreamARN or the StreamName parameter, or both. It is -// recommended that you use the StreamARN input parameter when you invoke this API. +// limit of five transactions per second per account. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. func (c *Client) ListTagsForStream(ctx context.Context, params *ListTagsForStreamInput, optFns ...func(*Options)) (*ListTagsForStreamOutput, error) { if params == nil { params = &ListTagsForStreamInput{} @@ -53,6 +55,7 @@ type ListTagsForStreamInput struct { } func (in *ListTagsForStreamInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -133,6 +136,12 @@ func (c *Client) addOperationListTagsForStreamMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForStream(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go index fc763f10d6d..380ca612037 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go @@ -20,28 +20,39 @@ import ( // range of 382...454, then you could merge these two shards into a single shard // that would have a hash key range of 276...454. After the merge, the single child // shard receives data for all hash key values covered by the two parent shards. +// // When invoking this API, you must use either the StreamARN or the StreamName // parameter, or both. It is recommended that you use the StreamARN input -// parameter when you invoke this API. MergeShards is called when there is a need -// to reduce the overall capacity of a stream because of excess capacity that is -// not being used. You must specify the shard to be merged and the adjacent shard -// for a stream. For more information about merging shards, see Merge Two Shards (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) -// in the Amazon Kinesis Data Streams Developer Guide. If the stream is in the -// ACTIVE state, you can call MergeShards . If a stream is in the CREATING , -// UPDATING , or DELETING state, MergeShards returns a ResourceInUseException . If -// the specified stream does not exist, MergeShards returns a -// ResourceNotFoundException . You can use DescribeStreamSummary to check the -// state of the stream, which is returned in StreamStatus . MergeShards is an -// asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis -// Data Streams immediately returns a response and sets the StreamStatus to -// UPDATING . After the operation is completed, Kinesis Data Streams sets the -// StreamStatus to ACTIVE . Read and write operations continue to work while the -// stream is in the UPDATING state. You use DescribeStreamSummary and the -// ListShards APIs to determine the shard IDs that are specified in the MergeShards -// request. If you try to operate on too many streams in parallel using -// CreateStream , DeleteStream , MergeShards , or SplitShard , you receive a -// LimitExceededException . MergeShards has a limit of five transactions per -// second per account. +// parameter when you invoke this API. +// +// MergeShards is called when there is a need to reduce the overall capacity of a +// stream because of excess capacity that is not being used. You must specify the +// shard to be merged and the adjacent shard for a stream. For more information +// about merging shards, see [Merge Two Shards]in the Amazon Kinesis Data Streams Developer Guide. +// +// If the stream is in the ACTIVE state, you can call MergeShards . If a stream is +// in the CREATING , UPDATING , or DELETING state, MergeShards returns a +// ResourceInUseException . If the specified stream does not exist, MergeShards +// returns a ResourceNotFoundException . +// +// You can use DescribeStreamSummary to check the state of the stream, which is returned in StreamStatus +// . +// +// MergeShards is an asynchronous operation. Upon receiving a MergeShards request, +// Amazon Kinesis Data Streams immediately returns a response and sets the +// StreamStatus to UPDATING . After the operation is completed, Kinesis Data +// Streams sets the StreamStatus to ACTIVE . Read and write operations continue to +// work while the stream is in the UPDATING state. +// +// You use DescribeStreamSummary and the ListShards APIs to determine the shard IDs that are specified in the +// MergeShards request. +// +// If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards , or SplitShard +// , you receive a LimitExceededException . +// +// MergeShards has a limit of five transactions per second per account. +// +// [Merge Two Shards]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html func (c *Client) MergeShards(ctx context.Context, params *MergeShardsInput, optFns ...func(*Options)) (*MergeShardsOutput, error) { if params == nil { params = &MergeShardsInput{} @@ -80,6 +91,7 @@ type MergeShardsInput struct { } func (in *MergeShardsInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -146,6 +158,12 @@ func (c *Client) addOperationMergeShardsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpMergeShardsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go index 6c7bb42e5d2..eb2c63ec8bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go @@ -15,38 +15,51 @@ import ( // Writes a single data record into an Amazon Kinesis data stream. Call PutRecord // to send data into the stream for real-time ingestion and subsequent processing, // one record at a time. Each shard can support writes up to 1,000 records per -// second, up to a maximum data write total of 1 MiB per second. When invoking this -// API, you must use either the StreamARN or the StreamName parameter, or both. It -// is recommended that you use the StreamARN input parameter when you invoke this -// API. You must specify the name of the stream that captures, stores, and -// transports the data; a partition key; and the data blob itself. The data blob -// can be any type of data; for example, a segment from a log file, -// geographic/location data, website clickstream data, and so on. The partition key -// is used by Kinesis Data Streams to distribute data across shards. Kinesis Data -// Streams segregates the data records that belong to a stream into multiple -// shards, using the partition key associated with each data record to determine -// the shard to which a given data record belongs. Partition keys are Unicode -// strings, with a maximum length limit of 256 characters for each key. An MD5 hash -// function is used to map partition keys to 128-bit integer values and to map -// associated data records to shards using the hash key ranges of the shards. You -// can override hashing the partition key to determine the shard by explicitly -// specifying a hash value using the ExplicitHashKey parameter. For more -// information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) -// in the Amazon Kinesis Data Streams Developer Guide. PutRecord returns the shard -// ID of where the data record was placed and the sequence number that was assigned -// to the data record. Sequence numbers increase over time and are specific to a -// shard within a stream, not across all shards within a stream. To guarantee -// strictly increasing ordering, write serially to a shard and use the -// SequenceNumberForOrdering parameter. For more information, see Adding Data to a -// Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) -// in the Amazon Kinesis Data Streams Developer Guide. After you write a record to -// a stream, you cannot modify that record or its order within the stream. If a -// PutRecord request cannot be processed because of insufficient provisioned +// second, up to a maximum data write total of 1 MiB per second. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; a partition key; and the data blob itself. +// +// The data blob can be any type of data; for example, a segment from a log file, +// geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Kinesis Data Streams to distribute data across +// shards. Kinesis Data Streams segregates the data records that belong to a stream +// into multiple shards, using the partition key associated with each data record +// to determine the shard to which a given data record belongs. +// +// Partition keys are Unicode strings, with a maximum length limit of 256 +// characters for each key. An MD5 hash function is used to map partition keys to +// 128-bit integer values and to map associated data records to shards using the +// hash key ranges of the shards. You can override hashing the partition key to +// determine the shard by explicitly specifying a hash value using the +// ExplicitHashKey parameter. For more information, see [Adding Data to a Stream] in the Amazon Kinesis +// Data Streams Developer Guide. +// +// PutRecord returns the shard ID of where the data record was placed and the +// sequence number that was assigned to the data record. +// +// Sequence numbers increase over time and are specific to a shard within a +// stream, not across all shards within a stream. To guarantee strictly increasing +// ordering, write serially to a shard and use the SequenceNumberForOrdering +// parameter. For more information, see [Adding Data to a Stream]in the Amazon Kinesis Data Streams +// Developer Guide. +// +// After you write a record to a stream, you cannot modify that record or its +// order within the stream. +// +// If a PutRecord request cannot be processed because of insufficient provisioned // throughput on the shard involved in the request, PutRecord throws -// ProvisionedThroughputExceededException . By default, data records are accessible -// for 24 hours from the time that they are added to a stream. You can use -// IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this -// retention period. +// ProvisionedThroughputExceededException . +// +// By default, data records are accessible for 24 hours from the time that they +// are added to a stream. You can use IncreaseStreamRetentionPeriodor DecreaseStreamRetentionPeriod to modify this retention period. +// +// [Adding Data to a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream func (c *Client) PutRecord(ctx context.Context, params *PutRecordInput, optFns ...func(*Options)) (*PutRecordOutput, error) { if params == nil { params = &PutRecordInput{} @@ -106,6 +119,7 @@ type PutRecordInput struct { } func (in *PutRecordInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("data") } @@ -128,7 +142,9 @@ type PutRecordOutput struct { // The encryption type to use on the record. This parameter can be one of the // following values: + // // - NONE : Do not encrypt the records in the stream. + // // - KMS : Use server-side encryption on the records in the stream using a // customer-managed Amazon Web Services KMS key. EncryptionType types.EncryptionType @@ -194,6 +210,12 @@ func (c *Client) addOperationPutRecordMiddlewares(stack *middleware.Stack, optio if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutRecordValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go index fdd5a3c0a84..73dc2f258c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go @@ -14,56 +14,74 @@ import ( // Writes multiple data records into a Kinesis data stream in a single call (also // referred to as a PutRecords request). Use this operation to send data into the -// stream for data ingestion and processing. When invoking this API, you must use -// either the StreamARN or the StreamName parameter, or both. It is recommended -// that you use the StreamARN input parameter when you invoke this API. Each -// PutRecords request can support up to 500 records. Each record in the request can -// be as large as 1 MiB, up to a limit of 5 MiB for the entire request, including -// partition keys. Each shard can support writes up to 1,000 records per second, up -// to a maximum data write total of 1 MiB per second. You must specify the name of -// the stream that captures, stores, and transports the data; and an array of -// request Records , with each record in the array requiring a partition key and -// data blob. The record size limit applies to the total size of the partition key -// and data blob. The data blob can be any type of data; for example, a segment -// from a log file, geographic/location data, website clickstream data, and so on. +// stream for data ingestion and processing. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// Each PutRecords request can support up to 500 records. Each record in the +// request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, +// including partition keys. Each shard can support writes up to 1,000 records per +// second, up to a maximum data write total of 1 MiB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; and an array of request Records , with each record in the array +// requiring a partition key and data blob. The record size limit applies to the +// total size of the partition key and data blob. +// +// The data blob can be any type of data; for example, a segment from a log file, +// geographic/location data, website clickstream data, and so on. +// // The partition key is used by Kinesis Data Streams as input to a hash function // that maps the partition key and associated data to a specific shard. An MD5 hash // function is used to map partition keys to 128-bit integer values and to map // associated data records to shards. As a result of this hashing mechanism, all // data records with the same partition key map to the same shard within the -// stream. For more information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) -// in the Amazon Kinesis Data Streams Developer Guide. Each record in the Records -// array may include an optional parameter, ExplicitHashKey , which overrides the -// partition key to shard mapping. This parameter allows a data producer to -// determine explicitly the shard where the record is stored. For more information, -// see Adding Multiple Records with PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) -// in the Amazon Kinesis Data Streams Developer Guide. The PutRecords response -// includes an array of response Records . Each record in the response array -// directly correlates with a record in the request array using natural ordering, -// from the top to the bottom of the request and response. The response Records -// array always includes the same number of records as the request array. The -// response Records array includes both successfully and unsuccessfully processed -// records. Kinesis Data Streams attempts to process all records in each PutRecords -// request. A single record failure does not stop the processing of subsequent -// records. As a result, PutRecords doesn't guarantee the ordering of records. If -// you need to read records in the same order they are written to the stream, use -// PutRecord instead of PutRecords , and write to the same shard. A successfully -// processed record includes ShardId and SequenceNumber values. The ShardId -// parameter identifies the shard in the stream where the record is stored. The -// SequenceNumber parameter is an identifier assigned to the put record, unique to -// all records in the stream. An unsuccessfully processed record includes ErrorCode -// and ErrorMessage values. ErrorCode reflects the type of error and can be one of -// the following values: ProvisionedThroughputExceededException or InternalFailure -// . ErrorMessage provides more detailed information about the +// stream. For more information, see [Adding Data to a Stream]in the Amazon Kinesis Data Streams Developer +// Guide. +// +// Each record in the Records array may include an optional parameter, +// ExplicitHashKey , which overrides the partition key to shard mapping. This +// parameter allows a data producer to determine explicitly the shard where the +// record is stored. For more information, see [Adding Multiple Records with PutRecords]in the Amazon Kinesis Data Streams +// Developer Guide. +// +// The PutRecords response includes an array of response Records . Each record in +// the response array directly correlates with a record in the request array using +// natural ordering, from the top to the bottom of the request and response. The +// response Records array always includes the same number of records as the +// request array. +// +// The response Records array includes both successfully and unsuccessfully +// processed records. Kinesis Data Streams attempts to process all records in each +// PutRecords request. A single record failure does not stop the processing of +// subsequent records. As a result, PutRecords doesn't guarantee the ordering of +// records. If you need to read records in the same order they are written to the +// stream, use PutRecordinstead of PutRecords , and write to the same shard. +// +// A successfully processed record includes ShardId and SequenceNumber values. The +// ShardId parameter identifies the shard in the stream where the record is stored. +// The SequenceNumber parameter is an identifier assigned to the put record, +// unique to all records in the stream. +// +// An unsuccessfully processed record includes ErrorCode and ErrorMessage values. +// ErrorCode reflects the type of error and can be one of the following values: +// ProvisionedThroughputExceededException or InternalFailure . ErrorMessage +// provides more detailed information about the // ProvisionedThroughputExceededException exception including the account ID, // stream name, and shard ID of the record that was throttled. For more information -// about partially successful responses, see Adding Multiple Records with -// PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) -// in the Amazon Kinesis Data Streams Developer Guide. After you write a record to -// a stream, you cannot modify that record or its order within the stream. By -// default, data records are accessible for 24 hours from the time that they are -// added to a stream. You can use IncreaseStreamRetentionPeriod or -// DecreaseStreamRetentionPeriod to modify this retention period. +// about partially successful responses, see [Adding Multiple Records with PutRecords]in the Amazon Kinesis Data Streams +// Developer Guide. +// +// After you write a record to a stream, you cannot modify that record or its +// order within the stream. +// +// By default, data records are accessible for 24 hours from the time that they +// are added to a stream. You can use IncreaseStreamRetentionPeriodor DecreaseStreamRetentionPeriod to modify this retention period. +// +// [Adding Multiple Records with PutRecords]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords +// [Adding Data to a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream func (c *Client) PutRecords(ctx context.Context, params *PutRecordsInput, optFns ...func(*Options)) (*PutRecordsOutput, error) { if params == nil { params = &PutRecordsInput{} @@ -97,6 +115,7 @@ type PutRecordsInput struct { } func (in *PutRecordsInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("data") } @@ -114,7 +133,9 @@ type PutRecordsOutput struct { // The encryption type used on the records. This parameter can be one of the // following values: + // // - NONE : Do not encrypt the records. + // // - KMS : Use server-side encryption on the records using a customer-managed // Amazon Web Services KMS key. EncryptionType types.EncryptionType @@ -183,6 +204,12 @@ func (c *Client) addOperationPutRecordsMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutRecordsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go index 9e8420cabfe..3c50531704d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go @@ -18,15 +18,18 @@ import ( // belong to the owner's account in order to use this operation. If you don't have // PutResourcePolicy permissions, Amazon Kinesis Data Streams returns a 403 Access // Denied error . If you receive a ResourceNotFoundException , check to see if you -// passed a valid stream or consumer resource. Request patterns can be one of the -// following: +// passed a valid stream or consumer resource. +// +// Request patterns can be one of the following: +// // - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+ +// // - Consumer pattern: // ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+ // -// For more information, see Controlling Access to Amazon Kinesis Data Streams -// Resources Using IAM (https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html) -// . +// For more information, see [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]. +// +// [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]: https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} @@ -59,6 +62,7 @@ type PutResourcePolicyInput struct { } func (in *PutResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + p.ResourceARN = in.ResourceARN p.OperationType = ptr.String("control") } @@ -125,6 +129,12 @@ func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go index ac13c752a31..6ef47480aa2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go @@ -13,17 +13,21 @@ import ( ) // Registers a consumer with a Kinesis data stream. When you use this operation, -// the consumer you register can then call SubscribeToShard to receive data from -// the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every -// shard you subscribe to. This rate is unaffected by the total number of consumers -// that read from the same stream. You can register up to 20 consumers per stream. -// A given consumer can only be registered with one stream at a time. For an -// example of how to use this operations, see Enhanced Fan-Out Using the Kinesis -// Data Streams API . The use of this operation has a limit of five transactions -// per second per account. Also, only 5 consumers can be created simultaneously. In -// other words, you cannot have more than 5 consumers in a CREATING status at the -// same time. Registering a 6th consumer while there are 5 in a CREATING status -// results in a LimitExceededException . +// the consumer you register can then call SubscribeToShardto receive data from the stream using +// enhanced fan-out, at a rate of up to 2 MiB per second for every shard you +// subscribe to. This rate is unaffected by the total number of consumers that read +// from the same stream. +// +// You can register up to 20 consumers per stream. A given consumer can only be +// registered with one stream at a time. +// +// For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. +// +// The use of this operation has a limit of five transactions per second per +// account. Also, only 5 consumers can be created simultaneously. In other words, +// you cannot have more than 5 consumers in a CREATING status at the same time. +// Registering a 6th consumer while there are 5 in a CREATING status results in a +// LimitExceededException . func (c *Client) RegisterStreamConsumer(ctx context.Context, params *RegisterStreamConsumerInput, optFns ...func(*Options)) (*RegisterStreamConsumerOutput, error) { if params == nil { params = &RegisterStreamConsumerInput{} @@ -48,9 +52,9 @@ type RegisterStreamConsumerInput struct { ConsumerName *string // The ARN of the Kinesis data stream that you want to register the consumer with. - // For more info, see Amazon Resource Names (ARNs) and Amazon Web Services Service - // Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams) - // . + // For more info, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams // // This member is required. StreamARN *string @@ -59,6 +63,7 @@ type RegisterStreamConsumerInput struct { } func (in *RegisterStreamConsumerInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -132,6 +137,12 @@ func (c *Client) addOperationRegisterStreamConsumerMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRegisterStreamConsumerValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go index 788a1d8fee2..16948a3923c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go @@ -12,12 +12,15 @@ import ( ) // Removes tags from the specified Kinesis data stream. Removed tags are deleted -// and cannot be recovered after this operation successfully completes. When -// invoking this API, you must use either the StreamARN or the StreamName +// and cannot be recovered after this operation successfully completes. +// +// When invoking this API, you must use either the StreamARN or the StreamName // parameter, or both. It is recommended that you use the StreamARN input -// parameter when you invoke this API. If you specify a tag that does not exist, it -// is ignored. RemoveTagsFromStream has a limit of five transactions per second -// per account. +// parameter when you invoke this API. +// +// If you specify a tag that does not exist, it is ignored. +// +// RemoveTagsFromStreamhas a limit of five transactions per second per account. func (c *Client) RemoveTagsFromStream(ctx context.Context, params *RemoveTagsFromStreamInput, optFns ...func(*Options)) (*RemoveTagsFromStreamOutput, error) { if params == nil { params = &RemoveTagsFromStreamInput{} @@ -51,6 +54,7 @@ type RemoveTagsFromStreamInput struct { } func (in *RemoveTagsFromStreamInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -117,6 +121,12 @@ func (c *Client) addOperationRemoveTagsFromStreamMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRemoveTagsFromStreamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go index 7a45014cae7..47f5bd06625 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go @@ -15,40 +15,52 @@ import ( // stream's capacity to ingest and transport data. SplitShard is called when there // is a need to increase the overall capacity of a stream because of an expected // increase in the volume of data records being ingested. This API is only -// supported for the data streams with the provisioned capacity mode. When invoking -// this API, you must use either the StreamARN or the StreamName parameter, or -// both. It is recommended that you use the StreamARN input parameter when you -// invoke this API. You can also use SplitShard when a shard appears to be -// approaching its maximum utilization; for example, the producers sending data -// into the specific shard are suddenly sending more than previously anticipated. -// You can also call SplitShard to increase stream capacity, so that more Kinesis -// Data Streams applications can simultaneously read data from the stream for -// real-time processing. You must specify the shard to be split and the new hash -// key, which is the position in the shard where the shard gets split in two. In -// many cases, the new hash key might be the average of the beginning and ending -// hash key, but it can be any hash key value in the range being mapped into the -// shard. For more information, see Split a Shard (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) -// in the Amazon Kinesis Data Streams Developer Guide. You can use -// DescribeStreamSummary and the ListShards APIs to determine the shard ID and -// hash key values for the ShardToSplit and NewStartingHashKey parameters that are -// specified in the SplitShard request. SplitShard is an asynchronous operation. -// Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a -// response and sets the stream status to UPDATING . After the operation is -// completed, Kinesis Data Streams sets the stream status to ACTIVE . Read and -// write operations continue to work while the stream is in the UPDATING state. -// You can use DescribeStreamSummary to check the status of the stream, which is -// returned in StreamStatus . If the stream is in the ACTIVE state, you can call -// SplitShard . If the specified stream does not exist, DescribeStreamSummary -// returns a ResourceNotFoundException . If you try to create more shards than are -// authorized for your account, you receive a LimitExceededException . For the -// default shard limit for an Amazon Web Services account, see Kinesis Data -// Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, -// contact Amazon Web Services Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) -// . If you try to operate on too many streams simultaneously using CreateStream , -// DeleteStream , MergeShards , and/or SplitShard , you receive a -// LimitExceededException . SplitShard has a limit of five transactions per second -// per account. +// supported for the data streams with the provisioned capacity mode. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// You can also use SplitShard when a shard appears to be approaching its maximum +// utilization; for example, the producers sending data into the specific shard are +// suddenly sending more than previously anticipated. You can also call SplitShard +// to increase stream capacity, so that more Kinesis Data Streams applications can +// simultaneously read data from the stream for real-time processing. +// +// You must specify the shard to be split and the new hash key, which is the +// position in the shard where the shard gets split in two. In many cases, the new +// hash key might be the average of the beginning and ending hash key, but it can +// be any hash key value in the range being mapped into the shard. For more +// information, see [Split a Shard]in the Amazon Kinesis Data Streams Developer Guide. +// +// You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for +// the ShardToSplit and NewStartingHashKey parameters that are specified in the +// SplitShard request. +// +// SplitShard is an asynchronous operation. Upon receiving a SplitShard request, +// Kinesis Data Streams immediately returns a response and sets the stream status +// to UPDATING . After the operation is completed, Kinesis Data Streams sets the +// stream status to ACTIVE . Read and write operations continue to work while the +// stream is in the UPDATING state. +// +// You can use DescribeStreamSummary to check the status of the stream, which is returned in +// StreamStatus . If the stream is in the ACTIVE state, you can call SplitShard . +// +// If the specified stream does not exist, DescribeStreamSummary returns a ResourceNotFoundException . +// If you try to create more shards than are authorized for your account, you +// receive a LimitExceededException . +// +// For the default shard limit for an Amazon Web Services account, see [Kinesis Data Streams Limits] in the +// Amazon Kinesis Data Streams Developer Guide. To increase this limit, [contact Amazon Web Services Support]. +// +// If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, +// you receive a LimitExceededException . +// +// SplitShard has a limit of five transactions per second per account. +// +// [Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html +// [contact Amazon Web Services Support]: https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html +// [Split a Shard]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html func (c *Client) SplitShard(ctx context.Context, params *SplitShardInput, optFns ...func(*Options)) (*SplitShardOutput, error) { if params == nil { params = &SplitShardInput{} @@ -93,6 +105,7 @@ type SplitShardInput struct { } func (in *SplitShardInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -159,6 +172,12 @@ func (c *Client) addOperationSplitShardMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSplitShardValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go index 29fe78f30e5..4542d94346a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go @@ -13,22 +13,27 @@ import ( ) // Enables or updates server-side encryption using an Amazon Web Services KMS key -// for a specified stream. When invoking this API, you must use either the -// StreamARN or the StreamName parameter, or both. It is recommended that you use -// the StreamARN input parameter when you invoke this API. Starting encryption is -// an asynchronous operation. Upon receiving the request, Kinesis Data Streams -// returns immediately and sets the status of the stream to UPDATING . After the -// update is complete, Kinesis Data Streams sets the status of the stream back to -// ACTIVE . Updating or applying encryption normally takes a few seconds to -// complete, but it can take minutes. You can continue to read and write data to -// your stream while its status is UPDATING . Once the status of the stream is -// ACTIVE , encryption begins for records written to the stream. API Limits: You -// can successfully apply a new Amazon Web Services KMS key for server-side -// encryption 25 times in a rolling 24-hour period. Note: It can take up to 5 -// seconds after the stream is in an ACTIVE status before all records written to -// the stream are encrypted. After you enable encryption, you can verify that -// encryption is applied by inspecting the API response from PutRecord or -// PutRecords . +// for a specified stream. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// Starting encryption is an asynchronous operation. Upon receiving the request, +// Kinesis Data Streams returns immediately and sets the status of the stream to +// UPDATING . After the update is complete, Kinesis Data Streams sets the status of +// the stream back to ACTIVE . Updating or applying encryption normally takes a few +// seconds to complete, but it can take minutes. You can continue to read and write +// data to your stream while its status is UPDATING . Once the status of the stream +// is ACTIVE , encryption begins for records written to the stream. +// +// API Limits: You can successfully apply a new Amazon Web Services KMS key for +// server-side encryption 25 times in a rolling 24-hour period. +// +// Note: It can take up to 5 seconds after the stream is in an ACTIVE status +// before all records written to the stream are encrypted. After you enable +// encryption, you can verify that encryption is applied by inspecting the API +// response from PutRecord or PutRecords . func (c *Client) StartStreamEncryption(ctx context.Context, params *StartStreamEncryptionInput, optFns ...func(*Options)) (*StartStreamEncryptionOutput, error) { if params == nil { params = &StartStreamEncryptionInput{} @@ -56,11 +61,16 @@ type StartStreamEncryptionInput struct { // Amazon Resource Name (ARN) to either an alias or a key, or an alias name // prefixed by "alias/".You can also use a master key owned by Kinesis Data Streams // by specifying the alias aws/kinesis . + // // - Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // // - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // // - Globally unique key ID example: 12345678-1234-1234-1234-123456789012 + // // - Alias name example: alias/MyAliasName + // // - Master key owned by Kinesis Data Streams: alias/aws/kinesis // // This member is required. @@ -76,6 +86,7 @@ type StartStreamEncryptionInput struct { } func (in *StartStreamEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -142,6 +153,12 @@ func (c *Client) addOperationStartStreamEncryptionMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartStreamEncryptionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go index 2bcc9e653f4..fd38cbab2bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go @@ -12,22 +12,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Disables server-side encryption for a specified stream. When invoking this API, -// you must use either the StreamARN or the StreamName parameter, or both. It is -// recommended that you use the StreamARN input parameter when you invoke this -// API. Stopping encryption is an asynchronous operation. Upon receiving the -// request, Kinesis Data Streams returns immediately and sets the status of the -// stream to UPDATING . After the update is complete, Kinesis Data Streams sets the -// status of the stream back to ACTIVE . Stopping encryption normally takes a few -// seconds to complete, but it can take minutes. You can continue to read and write -// data to your stream while its status is UPDATING . Once the status of the stream -// is ACTIVE , records written to the stream are no longer encrypted by Kinesis -// Data Streams. API Limits: You can successfully disable server-side encryption 25 -// times in a rolling 24-hour period. Note: It can take up to 5 seconds after the -// stream is in an ACTIVE status before all records written to the stream are no -// longer subject to encryption. After you disabled encryption, you can verify that -// encryption is not applied by inspecting the API response from PutRecord or -// PutRecords . +// Disables server-side encryption for a specified stream. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// Stopping encryption is an asynchronous operation. Upon receiving the request, +// Kinesis Data Streams returns immediately and sets the status of the stream to +// UPDATING . After the update is complete, Kinesis Data Streams sets the status of +// the stream back to ACTIVE . Stopping encryption normally takes a few seconds to +// complete, but it can take minutes. You can continue to read and write data to +// your stream while its status is UPDATING . Once the status of the stream is +// ACTIVE , records written to the stream are no longer encrypted by Kinesis Data +// Streams. +// +// API Limits: You can successfully disable server-side encryption 25 times in a +// rolling 24-hour period. +// +// Note: It can take up to 5 seconds after the stream is in an ACTIVE status +// before all records written to the stream are no longer subject to encryption. +// After you disabled encryption, you can verify that encryption is not applied by +// inspecting the API response from PutRecord or PutRecords . func (c *Client) StopStreamEncryption(ctx context.Context, params *StopStreamEncryptionInput, optFns ...func(*Options)) (*StopStreamEncryptionOutput, error) { if params == nil { params = &StopStreamEncryptionInput{} @@ -55,11 +61,16 @@ type StopStreamEncryptionInput struct { // Amazon Resource Name (ARN) to either an alias or a key, or an alias name // prefixed by "alias/".You can also use a master key owned by Kinesis Data Streams // by specifying the alias aws/kinesis . + // // - Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // // - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // // - Globally unique key ID example: 12345678-1234-1234-1234-123456789012 + // // - Alias name example: alias/MyAliasName + // // - Master key owned by Kinesis Data Streams: alias/aws/kinesis // // This member is required. @@ -75,6 +86,7 @@ type StopStreamEncryptionInput struct { } func (in *StopStreamEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -141,6 +153,12 @@ func (c *Client) addOperationStopStreamEncryptionMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStopStreamEncryptionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go index bf6323cf1c4..1b3767b33c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go @@ -17,22 +17,27 @@ import ( // specify in the ConsumerARN parameter and the shard you specify in the ShardId // parameter. After the connection is successfully established, Kinesis Data // Streams pushes records from the shard to the consumer over this connection. -// Before you call this operation, call RegisterStreamConsumer to register the -// consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds, -// your consumer starts receiving events of type SubscribeToShardEvent over the -// HTTP/2 connection for up to 5 minutes, after which time you need to call -// SubscribeToShard again to renew the subscription if you want to continue to -// receive records. You can make one call to SubscribeToShard per second per -// registered consumer per shard. For example, if you have a 4000 shard stream and -// two registered stream consumers, you can make one SubscribeToShard request per -// second for each combination of shard and registered consumer, allowing you to -// subscribe both consumers to all 4000 shards in one second. If you call -// SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds -// of a successful call, you'll get a ResourceInUseException . If you call -// SubscribeToShard 5 seconds or more after a successful call, the second call -// takes over the subscription and the previous connection expires or fails with a -// ResourceInUseException . For an example of how to use this operations, see -// Enhanced Fan-Out Using the Kinesis Data Streams API . +// Before you call this operation, call RegisterStreamConsumerto register the consumer with Kinesis Data +// Streams. +// +// When the SubscribeToShard call succeeds, your consumer starts receiving events +// of type SubscribeToShardEventover the HTTP/2 connection for up to 5 minutes, after which time you +// need to call SubscribeToShard again to renew the subscription if you want to +// continue to receive records. +// +// You can make one call to SubscribeToShard per second per registered consumer +// per shard. For example, if you have a 4000 shard stream and two registered +// stream consumers, you can make one SubscribeToShard request per second for each +// combination of shard and registered consumer, allowing you to subscribe both +// consumers to all 4000 shards in one second. +// +// If you call SubscribeToShard again with the same ConsumerARN and ShardId within +// 5 seconds of a successful call, you'll get a ResourceInUseException . If you +// call SubscribeToShard 5 seconds or more after a successful call, the second +// call takes over the subscription and the previous connection expires or fails +// with a ResourceInUseException . +// +// For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. func (c *Client) SubscribeToShard(ctx context.Context, params *SubscribeToShardInput, optFns ...func(*Options)) (*SubscribeToShardOutput, error) { if params == nil { params = &SubscribeToShardInput{} @@ -50,14 +55,13 @@ func (c *Client) SubscribeToShard(ctx context.Context, params *SubscribeToShardI type SubscribeToShardInput struct { - // For this parameter, use the value you obtained when you called - // RegisterStreamConsumer . + // For this parameter, use the value you obtained when you called RegisterStreamConsumer. // // This member is required. ConsumerARN *string // The ID of the shard you want to subscribe to. To see a list of all the shards - // for a given stream, use ListShards . + // for a given stream, use ListShards. // // This member is required. ShardId *string @@ -71,6 +75,7 @@ type SubscribeToShardInput struct { } func (in *SubscribeToShardInput) bindEndpointParams(p *EndpointParameters) { + p.ConsumerARN = in.ConsumerARN p.OperationType = ptr.String("data") } @@ -141,6 +146,12 @@ func (c *Client) addOperationSubscribeToShardMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSubscribeToShardValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go index 1a729fd43b3..63c991c6ffe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go @@ -14,37 +14,53 @@ import ( // Updates the shard count of the specified stream to the specified number of // shards. This API is only supported for the data streams with the provisioned -// capacity mode. When invoking this API, you must use either the StreamARN or the -// StreamName parameter, or both. It is recommended that you use the StreamARN -// input parameter when you invoke this API. Updating the shard count is an -// asynchronous operation. Upon receiving the request, Kinesis Data Streams returns -// immediately and sets the status of the stream to UPDATING . After the update is -// complete, Kinesis Data Streams sets the status of the stream back to ACTIVE . -// Depending on the size of the stream, the scaling action could take a few minutes -// to complete. You can continue to read and write data to your stream while its -// status is UPDATING . To update the shard count, Kinesis Data Streams performs -// splits or merges on individual shards. This can cause short-lived shards to be -// created, in addition to the final shards. These short-lived shards count towards -// your total shard limit for your account in the Region. When using this -// operation, we recommend that you specify a target shard count that is a multiple -// of 25% (25%, 50%, 75%, 100%). You can specify any target value within your shard -// limit. However, if you specify a target that isn't a multiple of 25%, the -// scaling action might take longer to complete. This operation has the following -// default limits. By default, you cannot do the following: +// capacity mode. +// +// When invoking this API, you must use either the StreamARN or the StreamName +// parameter, or both. It is recommended that you use the StreamARN input +// parameter when you invoke this API. +// +// Updating the shard count is an asynchronous operation. Upon receiving the +// request, Kinesis Data Streams returns immediately and sets the status of the +// stream to UPDATING . After the update is complete, Kinesis Data Streams sets the +// status of the stream back to ACTIVE . Depending on the size of the stream, the +// scaling action could take a few minutes to complete. You can continue to read +// and write data to your stream while its status is UPDATING . +// +// To update the shard count, Kinesis Data Streams performs splits or merges on +// individual shards. This can cause short-lived shards to be created, in addition +// to the final shards. These short-lived shards count towards your total shard +// limit for your account in the Region. +// +// When using this operation, we recommend that you specify a target shard count +// that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target +// value within your shard limit. However, if you specify a target that isn't a +// multiple of 25%, the scaling action might take longer to complete. +// +// This operation has the following default limits. By default, you cannot do the +// following: +// // - Scale more than ten times per rolling 24-hour period per stream +// // - Scale up to more than double your current shard count for a stream +// // - Scale down below half your current shard count for a stream +// // - Scale up to more than 10000 shards in a stream +// // - Scale a stream with more than 10000 shards down unless the result is less // than 10000 shards +// // - Scale up to more than the shard limit for your account +// // - Make over 10 TPS. TPS over 10 will trigger the LimitExceededException // -// For the default limits for an Amazon Web Services account, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide. To request an increase in -// the call rate limit, the shard limit for this API, or your overall shard limit, -// use the limits form (https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis) -// . +// For the default limits for an Amazon Web Services account, see [Streams Limits] in the Amazon +// Kinesis Data Streams Developer Guide. To request an increase in the call rate +// limit, the shard limit for this API, or your overall shard limit, use the [limits form]. +// +// [limits form]: https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis +// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html func (c *Client) UpdateShardCount(ctx context.Context, params *UpdateShardCountInput, optFns ...func(*Options)) (*UpdateShardCountOutput, error) { if params == nil { params = &UpdateShardCountInput{} @@ -69,11 +85,15 @@ type UpdateShardCountInput struct { // The new number of shards. This value has the following default limits. By // default, you cannot do the following: + // // - Set this value to more than double your current shard count for a stream. + // // - Set this value below half your current shard count for a stream. + // // - Set this value to more than 10000 shards in a stream (the default limit for // shard count per stream is 10000 per account per region), unless you request a // limit increase. + // // - Scale a stream with more than 10000 shards down unless you set this value // to less than 10000 shards. // @@ -90,6 +110,7 @@ type UpdateShardCountInput struct { } func (in *UpdateShardCountInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -169,6 +190,12 @@ func (c *Client) addOperationUpdateShardCountMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateShardCountValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go index d8e84b2249c..b932f3d1225 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go @@ -12,7 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates the capacity mode of the data stream. Currently, in Kinesis Data +// Updates the capacity mode of the data stream. Currently, in Kinesis Data +// // Streams, you can choose between an on-demand capacity mode and a provisioned // capacity mode for your data stream. func (c *Client) UpdateStreamMode(ctx context.Context, params *UpdateStreamModeInput, optFns ...func(*Options)) (*UpdateStreamModeOutput, error) { @@ -32,12 +33,12 @@ func (c *Client) UpdateStreamMode(ctx context.Context, params *UpdateStreamModeI type UpdateStreamModeInput struct { - // Specifies the ARN of the data stream whose capacity mode you want to update. + // Specifies the ARN of the data stream whose capacity mode you want to update. // // This member is required. StreamARN *string - // Specifies the capacity mode to which you want to set your data stream. + // Specifies the capacity mode to which you want to set your data stream. // Currently, in Kinesis Data Streams, you can choose between an on-demand capacity // mode and a provisioned capacity mode for your data streams. // @@ -48,6 +49,7 @@ type UpdateStreamModeInput struct { } func (in *UpdateStreamModeInput) bindEndpointParams(p *EndpointParameters) { + p.StreamARN = in.StreamARN p.OperationType = ptr.String("control") } @@ -114,6 +116,12 @@ func (c *Client) addOperationUpdateStreamModeMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateStreamModeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/auth.go index 0702f8afad6..68fb5a61569 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go index 8b854e50aed..6dc4f468087 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go @@ -21,8 +21,17 @@ import ( "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsjson11_deserializeOpAddTagsToStream struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go index 079c71dc260..86c722d3d86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go @@ -3,7 +3,8 @@ // Package kinesis provides the API client, operations, and parameter types for // Amazon Kinesis. // -// Amazon Kinesis Data Streams Service API Reference Amazon Kinesis Data Streams -// is a managed service that scales elastically for real-time processing of -// streaming big data. +// # Amazon Kinesis Data Streams Service API Reference +// +// Amazon Kinesis Data Streams is a managed service that scales elastically for +// real-time processing of streaming big data. package kinesis diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/endpoints.go index 23e6ea95efb..fc5e425ded5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/endpoints.go @@ -310,6 +310,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1021,7 +1032,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -1051,6 +1062,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -1060,7 +1075,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go index 8386cf3815d..76c9f66b64a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go @@ -3,4 +3,4 @@ package kinesis // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.4" +const goModuleVersion = "1.29.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go index 155ced3b14e..bd858f94a4c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go index 23fd058fe70..8722c62f40e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go @@ -12,8 +12,9 @@ const ( ) // Values returns all known values for ConsumerStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ConsumerStatus) Values() []ConsumerStatus { return []ConsumerStatus{ "CREATING", @@ -31,8 +32,9 @@ const ( ) // Values returns all known values for EncryptionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EncryptionType) Values() []EncryptionType { return []EncryptionType{ "NONE", @@ -55,8 +57,9 @@ const ( ) // Values returns all known values for MetricsName. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetricsName) Values() []MetricsName { return []MetricsName{ "IncomingBytes", @@ -78,8 +81,9 @@ const ( ) // Values returns all known values for ScalingType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ScalingType) Values() []ScalingType { return []ScalingType{ "UNIFORM_SCALING", @@ -99,8 +103,9 @@ const ( ) // Values returns all known values for ShardFilterType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ShardFilterType) Values() []ShardFilterType { return []ShardFilterType{ "AFTER_SHARD_ID", @@ -124,8 +129,9 @@ const ( ) // Values returns all known values for ShardIteratorType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ShardIteratorType) Values() []ShardIteratorType { return []ShardIteratorType{ "AT_SEQUENCE_NUMBER", @@ -145,8 +151,9 @@ const ( ) // Values returns all known values for StreamMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamMode) Values() []StreamMode { return []StreamMode{ "PROVISIONED", @@ -165,8 +172,9 @@ const ( ) // Values returns all known values for StreamStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamStatus) Values() []StreamStatus { return []StreamStatus{ "CREATING", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go index fcd07804d42..0df415d160c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go @@ -195,9 +195,10 @@ func (e *KMSDisabledException) ErrorCode() string { func (e *KMSDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use of -// a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Amazon Web Services Key Management Service Developer Guide. +// valid for this request. For more information, see [How Key State Affects Use of a Customer Master Key]in the Amazon Web Services +// Key Management Service Developer Guide. +// +// [How Key State Affects Use of a Customer Master Key]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html type KMSInvalidStateException struct { Message *string @@ -277,8 +278,10 @@ func (e *KMSOptInRequired) ErrorCode() string { func (e *KMSOptInRequired) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The request was denied due to request throttling. For more information about -// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) -// in the Amazon Web Services Key Management Service Developer Guide. +// throttling, see [Limits]in the Amazon Web Services Key Management Service Developer +// Guide. +// +// [Limits]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second type KMSThrottlingException struct { Message *string @@ -333,10 +336,11 @@ func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy. // The request rate for the stream is too high, or the requested data is too large // for the available throughput. Reduce the frequency or size of your requests. For -// more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) -// in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in Amazon Web Services (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// more information, see [Streams Limits]in the Amazon Kinesis Data Streams Developer Guide, and [Error Retries and Exponential Backoff in Amazon Web Services] // in the Amazon Web Services General Reference. +// +// [Error Retries and Exponential Backoff in Amazon Web Services]: https://docs.aws.amazon.com/general/latest/gr/api-retries.html +// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html type ProvisionedThroughputExceededException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go index c28c716ae71..309399c8a68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go @@ -31,14 +31,16 @@ type ChildShard struct { } // An object that represents the details of the consumer you registered. This type -// of object is returned by RegisterStreamConsumer . +// of object is returned by RegisterStreamConsumer. type Consumer struct { // When you register a consumer, Kinesis Data Streams generates an ARN for it. You - // need this ARN to be able to call SubscribeToShard . If you delete a consumer and - // then create a new one with the same name, it won't have the same ARN. That's - // because consumer ARNs contain the creation timestamp. This is important to keep - // in mind if you have IAM policies that reference consumer ARNs. + // need this ARN to be able to call SubscribeToShard. + // + // If you delete a consumer and then create a new one with the same name, it won't + // have the same ARN. That's because consumer ARNs contain the creation timestamp. + // This is important to keep in mind if you have IAM policies that reference + // consumer ARNs. // // This member is required. ConsumerARN *string @@ -62,14 +64,16 @@ type Consumer struct { } // An object that represents the details of a registered consumer. This type of -// object is returned by DescribeStreamConsumer . +// object is returned by DescribeStreamConsumer. type ConsumerDescription struct { // When you register a consumer, Kinesis Data Streams generates an ARN for it. You - // need this ARN to be able to call SubscribeToShard . If you delete a consumer and - // then create a new one with the same name, it won't have the same ARN. That's - // because consumer ARNs contain the creation timestamp. This is important to keep - // in mind if you have IAM policies that reference consumer ARNs. + // need this ARN to be able to call SubscribeToShard. + // + // If you delete a consumer and then create a new one with the same name, it won't + // have the same ARN. That's because consumer ARNs contain the creation timestamp. + // This is important to keep in mind if you have IAM policies that reference + // consumer ARNs. // // This member is required. ConsumerARN *string @@ -100,19 +104,30 @@ type ConsumerDescription struct { // Represents enhanced metrics types. type EnhancedMetrics struct { - // List of shard-level metrics. The following are the valid shard-level metrics. - // The value " ALL " enhances every metric. + // List of shard-level metrics. + // + // The following are the valid shard-level metrics. The value " ALL " enhances + // every metric. + // // - IncomingBytes + // // - IncomingRecords + // // - OutgoingBytes + // // - OutgoingRecords + // // - WriteProvisionedThroughputExceeded + // // - ReadProvisionedThroughputExceeded + // // - IteratorAgeMilliseconds + // // - ALL - // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) - // in the Amazon Kinesis Data Streams Developer Guide. + // + // For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide. + // + // [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html ShardLevelMetrics []MetricsName noSmithyDocumentSerde @@ -218,7 +233,9 @@ type Record struct { // The encryption type used on the record. This parameter can be one of the // following values: + // // - NONE : Do not encrypt the records in the stream. + // // - KMS : Use server-side encryption on the records in the stream using a // customer-managed Amazon Web Services KMS key. EncryptionType EncryptionType @@ -273,19 +290,26 @@ type Shard struct { type ShardFilter struct { // The shard type specified in the ShardFilter parameter. This is a required - // property of the ShardFilter parameter. You can specify the following valid - // values: + // property of the ShardFilter parameter. + // + // You can specify the following valid values: + // // - AFTER_SHARD_ID - the response includes all the shards, starting with the // shard whose ID immediately follows the ShardId that you provided. + // // - AT_TRIM_HORIZON - the response includes all the shards that were open at // TRIM_HORIZON . + // // - FROM_TRIM_HORIZON - (default), the response includes all the shards within // the retention period of the data stream (trim to tip). + // // - AT_LATEST - the response includes only the currently open shards of the data // stream. + // // - AT_TIMESTAMP - the response includes all shards whose start timestamp is // less than or equal to the given timestamp and end timestamp is greater than or // equal to the given timestamp or still open. + // // - FROM_TIMESTAMP - the response incldues all closed shards whose end timestamp // is greater than or equal to the given timestamp and also all open shards. // Corrected to TRIM_HORIZON of the data stream if FROM_TIMESTAMP is less than @@ -311,14 +335,21 @@ type ShardFilter struct { type StartingPosition struct { // You can set the starting position to one of the following values: + // // AT_SEQUENCE_NUMBER : Start streaming from the position denoted by the sequence - // number specified in the SequenceNumber field. AFTER_SEQUENCE_NUMBER : Start - // streaming right after the position denoted by the sequence number specified in - // the SequenceNumber field. AT_TIMESTAMP : Start streaming from the position - // denoted by the time stamp specified in the Timestamp field. TRIM_HORIZON : Start - // streaming at the last untrimmed record in the shard, which is the oldest data - // record in the shard. LATEST : Start streaming just after the most recent record - // in the shard, so that you always read the most recent data in the shard. + // number specified in the SequenceNumber field. + // + // AFTER_SEQUENCE_NUMBER : Start streaming right after the position denoted by the + // sequence number specified in the SequenceNumber field. + // + // AT_TIMESTAMP : Start streaming from the position denoted by the time stamp + // specified in the Timestamp field. + // + // TRIM_HORIZON : Start streaming at the last untrimmed record in the shard, which + // is the oldest data record in the shard. + // + // LATEST : Start streaming just after the most recent record in the shard, so that + // you always read the most recent data in the shard. // // This member is required. Type ShardIteratorType @@ -340,7 +371,7 @@ type StartingPosition struct { noSmithyDocumentSerde } -// Represents the output for DescribeStream . +// Represents the output for DescribeStream. type StreamDescription struct { // Represents the current enhanced monitoring settings of the stream. @@ -381,13 +412,17 @@ type StreamDescription struct { // The current status of the stream being described. The stream status is one of // the following states: + // // - CREATING - The stream is being created. Kinesis Data Streams immediately // returns and sets StreamStatus to CREATING . + // // - DELETING - The stream is being deleted. The specified stream is in the // DELETING state until Kinesis Data Streams completes the deletion. + // // - ACTIVE - The stream exists and is ready for read and write operations or // deletion. You should perform read and write operations only on an ACTIVE // stream. + // // - UPDATING - Shards in the stream are being merged or split. Read and write // operations continue to work while the stream is in the UPDATING state. // @@ -396,7 +431,9 @@ type StreamDescription struct { // The server-side encryption type used on the stream. This parameter can be one // of the following values: + // // - NONE : Do not encrypt the records in the stream. + // // - KMS : Use server-side encryption on the records in the stream using a // customer-managed Amazon Web Services KMS key. EncryptionType EncryptionType @@ -406,15 +443,20 @@ type StreamDescription struct { // ARN to either an alias or a key, or an alias name prefixed by "alias/".You can // also use a master key owned by Kinesis Data Streams by specifying the alias // aws/kinesis . + // // - Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // // - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // // - Globally unique key ID example: 12345678-1234-1234-1234-123456789012 + // // - Alias name example: alias/MyAliasName + // // - Master key owned by Kinesis Data Streams: alias/aws/kinesis KeyId *string - // Specifies the capacity mode to which you want to set your data stream. + // Specifies the capacity mode to which you want to set your data stream. // Currently, in Kinesis Data Streams, you can choose between an on-demand capacity // mode and a provisioned capacity mode for your data streams. StreamModeDetails *StreamModeDetails @@ -457,13 +499,17 @@ type StreamDescriptionSummary struct { // The current status of the stream being described. The stream status is one of // the following states: + // // - CREATING - The stream is being created. Kinesis Data Streams immediately // returns and sets StreamStatus to CREATING . + // // - DELETING - The stream is being deleted. The specified stream is in the // DELETING state until Kinesis Data Streams completes the deletion. + // // - ACTIVE - The stream exists and is ready for read and write operations or // deletion. You should perform read and write operations only on an ACTIVE // stream. + // // - UPDATING - Shards in the stream are being merged or split. Read and write // operations continue to work while the stream is in the UPDATING state. // @@ -474,7 +520,9 @@ type StreamDescriptionSummary struct { ConsumerCount *int32 // The encryption type used. This value is one of the following: + // // - KMS + // // - NONE EncryptionType EncryptionType @@ -483,15 +531,20 @@ type StreamDescriptionSummary struct { // ARN to either an alias or a key, or an alias name prefixed by "alias/".You can // also use a master key owned by Kinesis Data Streams by specifying the alias // aws/kinesis . + // // - Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // // - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // // - Globally unique key ID example: 12345678-1234-1234-1234-123456789012 + // // - Alias name example: alias/MyAliasName + // // - Master key owned by Kinesis Data Streams: alias/aws/kinesis KeyId *string - // Specifies the capacity mode to which you want to set your data stream. + // Specifies the capacity mode to which you want to set your data stream. // Currently, in Kinesis Data Streams, you can choose between an on-demand // ycapacity mode and a provisioned capacity mode for your data streams. StreamModeDetails *StreamModeDetails @@ -499,12 +552,13 @@ type StreamDescriptionSummary struct { noSmithyDocumentSerde } -// Specifies the capacity mode to which you want to set your data stream. +// Specifies the capacity mode to which you want to set your data stream. +// // Currently, in Kinesis Data Streams, you can choose between an on-demand capacity // mode and a provisioned capacity mode for your data streams. type StreamModeDetails struct { - // Specifies the capacity mode to which you want to set your data stream. + // Specifies the capacity mode to which you want to set your data stream. // Currently, in Kinesis Data Streams, you can choose between an on-demand capacity // mode and a provisioned capacity mode for your data streams. // @@ -535,7 +589,7 @@ type StreamSummary struct { // The timestamp at which the stream was created. StreamCreationTimestamp *time.Time - // Specifies the capacity mode to which you want to set your data stream. + // Specifies the capacity mode to which you want to set your data stream. // Currently, in Kinesis Data Streams, you can choose between an on-demand capacity // mode and a provisioned capacity mode for your data streams. StreamModeDetails *StreamModeDetails @@ -543,14 +597,14 @@ type StreamSummary struct { noSmithyDocumentSerde } -// After you call SubscribeToShard , Kinesis Data Streams sends events of this type -// over an HTTP/2 connection to your consumer. +// After you call SubscribeToShard, Kinesis Data Streams sends events of this type over an HTTP/2 +// connection to your consumer. type SubscribeToShardEvent struct { - // Use this as SequenceNumber in the next call to SubscribeToShard , with - // StartingPosition set to AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER . Use - // ContinuationSequenceNumber for checkpointing because it captures your shard - // progress even when no data is written to the shard. + // Use this as SequenceNumber in the next call to SubscribeToShard, with StartingPosition set to + // AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER . Use ContinuationSequenceNumber + // for checkpointing because it captures your shard progress even when no data is + // written to the shard. // // This member is required. ContinuationSequenceNumber *string @@ -576,7 +630,7 @@ type SubscribeToShardEvent struct { } // This is a tagged union for all of the types of events an enhanced fan-out -// consumer can receive over HTTP/2 after a call to SubscribeToShard . +// consumer can receive over HTTP/2 after a call to SubscribeToShard. // // The following types satisfy this interface: // @@ -585,9 +639,8 @@ type SubscribeToShardEventStream interface { isSubscribeToShardEventStream() } -// After you call SubscribeToShard , Kinesis Data Streams sends events of this type -// to your consumer. For an example of how to handle these events, see Enhanced -// Fan-Out Using the Kinesis Data Streams API . +// After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your +// consumer. For an example of how to handle these events, see Enhanced Fan-Out Using the Kinesis Data Streams API. type SubscribeToShardEventStreamMemberSubscribeToShardEvent struct { Value SubscribeToShardEvent diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/CHANGELOG.md index 405ecde02ca..a54070f0a78 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/CHANGELOG.md @@ -1,3 +1,71 @@ +# v1.32.4 (2024-07-18) + +* **Documentation**: Doc only update for Secrets Manager + +# v1.32.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.31.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-17) + +* **Documentation**: Doc only update for Secrets Manager +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-12) + +* **Feature**: Introducing RotationToken parameter for PutSecretValue API + +# v1.29.3 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.2 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.1 (2024-05-23) + +* No change notes available for this release. + +# v1.29.0 (2024-05-20) + +* **Feature**: add v2 smoke tests and smithy smokeTests trait for SDK testing + +# v1.28.9 (2024-05-16) + +* **Documentation**: Documentation updates for AWS Secrets Manager +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.28.6 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_client.go index 306c347d459..e56f08b9fa4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_client.go @@ -15,7 +15,9 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" @@ -23,6 +25,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -32,6 +35,9 @@ const ServiceAPIVersion = "2017-10-17" // Client provides the API client to make operations call for AWS Secrets Manager. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -72,6 +78,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -233,15 +241,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -445,6 +454,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func resolveIdempotencyTokenProvider(o *Options) { if o.IdempotencyTokenProvider != nil { return @@ -495,6 +528,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + // IdempotencyTokenProvider interface for providing idempotency token type IdempotencyTokenProvider interface { GetIdempotencyToken() (string, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_BatchGetSecretValue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_BatchGetSecretValue.go index 8aa4456d280..9fe8033e647 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_BatchGetSecretValue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_BatchGetSecretValue.go @@ -12,23 +12,27 @@ import ( ) // Retrieves the contents of the encrypted fields SecretString or SecretBinary for -// up to 20 secrets. To retrieve a single secret, call GetSecretValue . To choose -// which secrets to retrieve, you can specify a list of secrets by name or ARN, or -// you can use filters. If Secrets Manager encounters errors such as +// up to 20 secrets. To retrieve a single secret, call GetSecretValue. +// +// To choose which secrets to retrieve, you can specify a list of secrets by name +// or ARN, or you can use filters. If Secrets Manager encounters errors such as // AccessDeniedException while attempting to retrieve any of the secrets, you can -// see the errors in Errors in the response. Secrets Manager generates CloudTrail -// GetSecretValue log entries for each secret you request when you call this -// action. Do not include sensitive information in request parameters because it -// might be logged. For more information, see Logging Secrets Manager events with -// CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:BatchGetSecretValue , and you must have +// see the errors in Errors in the response. +// +// Secrets Manager generates CloudTrail GetSecretValue log entries for each secret +// you request when you call this action. Do not include sensitive information in +// request parameters because it might be logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:BatchGetSecretValue , and you must have // secretsmanager:GetSecretValue for each secret. If you use filters, you must also // have secretsmanager:ListSecrets . If the secrets are encrypted using // customer-managed keys instead of the Amazon Web Services managed key // aws/secretsmanager , then you also need kms:Decrypt permissions for the keys. -// For more information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// For more information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) BatchGetSecretValue(ctx context.Context, params *BatchGetSecretValueInput, optFns ...func(*Options)) (*BatchGetSecretValueOutput, error) { if params == nil { params = &BatchGetSecretValueInput{} @@ -50,10 +54,12 @@ type BatchGetSecretValueInput struct { // SecretIdList , but not both. Filters []types.Filter - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call BatchGetSecretValue again with the value from NextToken . To use - // this parameter, you must also use the Filters parameter. + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call BatchGetSecretValue again with the + // value from NextToken . To use this parameter, you must also use the Filters + // parameter. MaxResults *int32 // A token that indicates where the output should continue from, if a previous @@ -144,6 +150,12 @@ func (c *Client) addOperationBatchGetSecretValueMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetSecretValue(options.Region), middleware.Before); err != nil { return err } @@ -165,21 +177,15 @@ func (c *Client) addOperationBatchGetSecretValueMiddlewares(stack *middleware.St return nil } -// BatchGetSecretValueAPIClient is a client that implements the -// BatchGetSecretValue operation. -type BatchGetSecretValueAPIClient interface { - BatchGetSecretValue(context.Context, *BatchGetSecretValueInput, ...func(*Options)) (*BatchGetSecretValueOutput, error) -} - -var _ BatchGetSecretValueAPIClient = (*Client)(nil) - // BatchGetSecretValuePaginatorOptions is the paginator options for // BatchGetSecretValue type BatchGetSecretValuePaginatorOptions struct { - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call BatchGetSecretValue again with the value from NextToken . To use - // this parameter, you must also use the Filters parameter. + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call BatchGetSecretValue again with the + // value from NextToken . To use this parameter, you must also use the Filters + // parameter. Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -240,6 +246,9 @@ func (p *BatchGetSecretValuePaginator) NextPage(ctx context.Context, optFns ...f } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.BatchGetSecretValue(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -259,6 +268,14 @@ func (p *BatchGetSecretValuePaginator) NextPage(ctx context.Context, optFns ...f return result, nil } +// BatchGetSecretValueAPIClient is a client that implements the +// BatchGetSecretValue operation. +type BatchGetSecretValueAPIClient interface { + BatchGetSecretValue(context.Context, *BatchGetSecretValueInput, ...func(*Options)) (*BatchGetSecretValueOutput, error) +} + +var _ BatchGetSecretValueAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opBatchGetSecretValue(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CancelRotateSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CancelRotateSecret.go index 18edf0d2fde..1b0031cc5f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CancelRotateSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CancelRotateSecret.go @@ -11,22 +11,29 @@ import ( ) // Turns off automatic rotation, and if a rotation is currently in progress, -// cancels the rotation. If you cancel a rotation in progress, it can leave the -// VersionStage labels in an unexpected state. You might need to remove the staging -// label AWSPENDING from the partially created version. You also need to determine -// whether to roll back to the previous version of the secret by moving the staging -// label AWSCURRENT to the version that has AWSPENDING . To determine which version -// has a specific staging label, call ListSecretVersionIds . Then use -// UpdateSecretVersionStage to change staging labels. For more information, see -// How rotation works (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) -// . To turn on automatic rotation again, call RotateSecret . Secrets Manager -// generates a CloudTrail log entry when you call this action. Do not include -// sensitive information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:CancelRotateSecret . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// cancels the rotation. +// +// If you cancel a rotation in progress, it can leave the VersionStage labels in +// an unexpected state. You might need to remove the staging label AWSPENDING from +// the partially created version. You also need to determine whether to roll back +// to the previous version of the secret by moving the staging label AWSCURRENT to +// the version that has AWSPENDING . To determine which version has a specific +// staging label, call ListSecretVersionIds. Then use UpdateSecretVersionStage to change staging labels. For more information, +// see [How rotation works]. +// +// To turn on automatic rotation again, call RotateSecret. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:CancelRotateSecret . For more information, +// see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [How rotation works]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) CancelRotateSecret(ctx context.Context, params *CancelRotateSecretInput, optFns ...func(*Options)) (*CancelRotateSecretOutput, error) { if params == nil { params = &CancelRotateSecretInput{} @@ -44,9 +51,12 @@ func (c *Client) CancelRotateSecret(ctx context.Context, params *CancelRotateSec type CancelRotateSecretInput struct { - // The ARN or name of the secret. For an ARN, we recommend that you specify a - // complete ARN rather than a partial ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -130,6 +140,12 @@ func (c *Client) addOperationCancelRotateSecretMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCancelRotateSecretValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CreateSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CreateSecret.go index 46d984e010b..3e651091672 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CreateSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_CreateSecret.go @@ -17,35 +17,55 @@ import ( // connection information to access a database or other service, which Secrets // Manager doesn't encrypt. A secret in Secrets Manager consists of both the // protected secret data and the important information needed to manage the secret. -// For secrets that use managed rotation, you need to create the secret through the -// managing service. For more information, see Secrets Manager secrets managed by -// other Amazon Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html) -// . For information about creating a secret in the console, see Create a secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html) -// . To create a secret, you can provide the secret value to be encrypted in either +// +// For secrets that use managed rotation, you need to create the secret through +// the managing service. For more information, see [Secrets Manager secrets managed by other Amazon Web Services services]. +// +// For information about creating a secret in the console, see [Create a secret]. +// +// To create a secret, you can provide the secret value to be encrypted in either // the SecretString parameter or the SecretBinary parameter, but not both. If you // include SecretString or SecretBinary then Secrets Manager creates an initial // secret version and automatically attaches the staging label AWSCURRENT to it. +// // For database credentials you want to rotate, for Secrets Manager to be able to // rotate the secret, you must make sure the JSON you store in the SecretString -// matches the JSON structure of a database secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_secret_json_structure.html) -// . If you don't specify an KMS encryption key, Secrets Manager uses the Amazon -// Web Services managed key aws/secretsmanager . If this key doesn't already exist -// in your account, then Secrets Manager creates it for you automatically. All -// users and roles in the Amazon Web Services account automatically have access to -// use aws/secretsmanager . Creating aws/secretsmanager can result in a one-time -// significant delay in returning the result. If the secret is in a different -// Amazon Web Services account from the credentials calling the API, then you can't -// use aws/secretsmanager to encrypt the secret, and you must create and use a -// customer managed KMS key. Secrets Manager generates a CloudTrail log entry when -// you call this action. Do not include sensitive information in request parameters -// except SecretBinary or SecretString because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:CreateSecret . If you include tags in the -// secret, you also need secretsmanager:TagResource . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . To encrypt the secret with a KMS key other than aws/secretsmanager , you need +// matches the [JSON structure of a database secret]. +// +// If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web +// Services managed key aws/secretsmanager . If this key doesn't already exist in +// your account, then Secrets Manager creates it for you automatically. All users +// and roles in the Amazon Web Services account automatically have access to use +// aws/secretsmanager . Creating aws/secretsmanager can result in a one-time +// significant delay in returning the result. +// +// If the secret is in a different Amazon Web Services account from the +// credentials calling the API, then you can't use aws/secretsmanager to encrypt +// the secret, and you must create and use a customer managed KMS key. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters except SecretBinary or +// SecretString because it might be logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:CreateSecret . If you include tags in the +// secret, you also need secretsmanager:TagResource . To add replica Regions, you +// must also have secretsmanager:ReplicateSecretToRegions . For more information, +// see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// To encrypt the secret with a KMS key other than aws/secretsmanager , you need // kms:GenerateDataKey and kms:Decrypt permission to the key. +// +// When you enter commands in a command shell, there is a risk of the command +// history being accessed or utilities having access to your command parameters. +// This is a concern if the command includes the value of a secret. Learn how to [Mitigate the risks of using command-line tools to store Secrets Manager secrets]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Secrets Manager secrets managed by other Amazon Web Services services]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html +// [Mitigate the risks of using command-line tools to store Secrets Manager secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/security_cli-exposure-risks.html +// [Create a secret]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [JSON structure of a database secret]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_secret_json_structure.html func (c *Client) CreateSecret(ctx context.Context, params *CreateSecretInput, optFns ...func(*Options)) (*CreateSecretOutput, error) { if params == nil { params = &CreateSecretInput{} @@ -63,12 +83,15 @@ func (c *Client) CreateSecret(ctx context.Context, params *CreateSecretInput, op type CreateSecretInput struct { - // The name of the new secret. The secret name can contain ASCII letters, numbers, - // and the following characters: /_+=.@- Do not end your secret name with a hyphen - // followed by six characters. If you do so, you risk confusion and unexpected - // results when searching for a secret by partial ARN. Secrets Manager - // automatically adds a hyphen and six random characters after the secret name at - // the end of the ARN. + // The name of the new secret. + // + // The secret name can contain ASCII letters, numbers, and the following + // characters: /_+=.@- + // + // Do not end your secret name with a hyphen followed by six characters. If you do + // so, you risk confusion and unexpected results when searching for a secret by + // partial ARN. Secrets Manager automatically adds a hyphen and six random + // characters after the secret name at the end of the ARN. // // This member is required. Name *string @@ -78,26 +101,36 @@ type CreateSecretInput struct { // If you include SecretString or SecretBinary , then Secrets Manager creates an // initial version for the secret, and this parameter specifies the unique - // identifier for the new version. If you use the Amazon Web Services CLI or one of - // the Amazon Web Services SDKs to call this operation, then you can leave this - // parameter empty. The CLI or SDK generates a random UUID for you and includes it - // as the value for this parameter in the request. If you generate a raw HTTP - // request to the Secrets Manager service endpoint, then you must generate a - // ClientRequestToken and include it in the request. This value helps ensure - // idempotency. Secrets Manager uses this value to prevent the accidental creation - // of duplicate versions if there are failures and retries during a rotation. We - // recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) - // value to ensure uniqueness of your versions within the specified secret. + // identifier for the new version. + // + // If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs + // to call this operation, then you can leave this parameter empty. The CLI or SDK + // generates a random UUID for you and includes it as the value for this parameter + // in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a [UUID-type]value to ensure uniqueness + // of your versions within the specified secret. + // // - If the ClientRequestToken value isn't already associated with a version of // the secret then a new version of the secret is created. + // // - If a version with this value already exists and the version SecretString and // SecretBinary values are the same as those in the request, then the request is // ignored. + // // - If a version with this value already exists and that version's SecretString // and SecretBinary values are different from those in the request, then the - // request fails because you cannot modify an existing version. Instead, use - // PutSecretValue to create a new version. + // request fails because you cannot modify an existing version. Instead, use PutSecretValueto + // create a new version. + // // This value becomes the VersionId of the new version. + // + // [UUID-type]: https://wikipedia.org/wiki/Universally_unique_identifier ClientRequestToken *string // The description of the secret. @@ -109,50 +142,75 @@ type CreateSecretInput struct { // The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt // the secret value in the secret. An alias is always prefixed by alias/ , for - // example alias/aws/secretsmanager . For more information, see About aliases (https://docs.aws.amazon.com/kms/latest/developerguide/alias-about.html) - // . To use a KMS key in a different account, use the key ARN or the alias ARN. If - // you don't specify this value, then Secrets Manager uses the key + // example alias/aws/secretsmanager . For more information, see [About aliases]. + // + // To use a KMS key in a different account, use the key ARN or the alias ARN. + // + // If you don't specify this value, then Secrets Manager uses the key // aws/secretsmanager . If that key doesn't yet exist, then Secrets Manager creates - // it for you automatically the first time it encrypts the secret value. If the - // secret is in a different Amazon Web Services account from the credentials - // calling the API, then you can't use aws/secretsmanager to encrypt the secret, - // and you must create and use a customer managed KMS key. + // it for you automatically the first time it encrypts the secret value. + // + // If the secret is in a different Amazon Web Services account from the + // credentials calling the API, then you can't use aws/secretsmanager to encrypt + // the secret, and you must create and use a customer managed KMS key. + // + // [About aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/alias-about.html KmsKeyId *string // The binary data to encrypt and store in the new version of the secret. We // recommend that you store your binary data in a file and then pass the contents - // of the file as a parameter. Either SecretString or SecretBinary must have a - // value, but not both. This parameter is not available in the Secrets Manager - // console. + // of the file as a parameter. + // + // Either SecretString or SecretBinary must have a value, but not both. + // + // This parameter is not available in the Secrets Manager console. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretBinary []byte // The text data to encrypt and store in this new version of the secret. We // recommend you use a JSON structure of key/value pairs for your secret value. - // Either SecretString or SecretBinary must have a value, but not both. If you - // create a secret by using the Secrets Manager console then Secrets Manager puts - // the protected secret text in only the SecretString parameter. The Secrets - // Manager console stores the information as a JSON structure of key/value pairs - // that a Lambda rotation function can parse. + // + // Either SecretString or SecretBinary must have a value, but not both. + // + // If you create a secret by using the Secrets Manager console then Secrets + // Manager puts the protected secret text in only the SecretString parameter. The + // Secrets Manager console stores the information as a JSON structure of key/value + // pairs that a Lambda rotation function can parse. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretString *string // A list of tags to attach to the secret. Each tag is a key and value pair of // strings in a JSON text string, for example: - // [{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}] + // + // [{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}] + // // Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a - // different tag from one with key "abc". If you check tags in permissions policies - // as part of your security strategy, then adding or removing a tag can change - // permissions. If the completion of this operation would result in you losing your - // permissions for this secret, then Secrets Manager blocks the operation and - // returns an Access Denied error. For more information, see Control access to - // secrets using tags (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac) - // and Limit access to identities with tags that match secrets' tags (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2) - // . For information about how to format a JSON parameter for the various command - // line tool environments, see Using JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) - // . If your command-line tool or SDK requires quotation marks around the - // parameter, you should use single quotes to avoid confusion with the double - // quotes required in the JSON text. For tag quotas and naming restrictions, see - // Service quotas for Tagging (https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas) - // in the Amazon Web Services General Reference guide. + // different tag from one with key "abc". + // + // If you check tags in permissions policies as part of your security strategy, + // then adding or removing a tag can change permissions. If the completion of this + // operation would result in you losing your permissions for this secret, then + // Secrets Manager blocks the operation and returns an Access Denied error. For + // more information, see [Control access to secrets using tags]and [Limit access to identities with tags that match secrets' tags]. + // + // For information about how to format a JSON parameter for the various command + // line tool environments, see [Using JSON for Parameters]. If your command-line tool or SDK requires + // quotation marks around the parameter, you should use single quotes to avoid + // confusion with the double quotes required in the JSON text. + // + // For tag quotas and naming restrictions, see [Service quotas for Tagging] in the Amazon Web Services General + // Reference guide. + // + // [Limit access to identities with tags that match secrets' tags]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2 + // [Using JSON for Parameters]: https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json + // [Service quotas for Tagging]: https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas + // [Control access to secrets using tags]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac Tags []types.Tag noSmithyDocumentSerde @@ -170,9 +228,12 @@ type CreateSecretOutput struct { Name *string // A list of the replicas of this secret and their status: + // // - Failed , which indicates that the replica was not created. + // // - InProgress , which indicates that Secrets Manager is in the process of // creating the replica. + // // - InSync , which indicates that the replica was created. ReplicationStatus []types.ReplicationStatusType @@ -240,6 +301,12 @@ func (c *Client) addOperationCreateSecretMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateSecretMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteResourcePolicy.go index 46e5b4798d5..dbe41c93db6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteResourcePolicy.go @@ -11,14 +11,18 @@ import ( ) // Deletes the resource-based permission policy attached to the secret. To attach -// a policy to a secret, use PutResourcePolicy . Secrets Manager generates a -// CloudTrail log entry when you call this action. Do not include sensitive -// information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:DeleteResourcePolicy . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// a policy to a secret, use PutResourcePolicy. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:DeleteResourcePolicy . For more +// information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) { if params == nil { params = &DeleteResourcePolicyInput{} @@ -37,9 +41,11 @@ func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourc type DeleteResourcePolicyInput struct { // The ARN or name of the secret to delete the attached resource-based policy for. + // // For an ARN, we recommend that you specify a complete ARN rather than a partial - // ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -116,6 +122,12 @@ func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteSecret.go index f34999b0681..f7c84bf8c55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DeleteSecret.go @@ -15,31 +15,41 @@ import ( // during which you can restore the secret. The minimum recovery window is 7 days. // The default recovery window is 30 days. Secrets Manager attaches a DeletionDate // stamp to the secret that specifies the end of the recovery window. At the end of -// the recovery window, Secrets Manager deletes the secret permanently. You can't -// delete a primary secret that is replicated to other Regions. You must first -// delete the replicas using RemoveRegionsFromReplication , and then delete the -// primary secret. When you delete a replica, it is deleted immediately. You can't -// directly delete a version of a secret. Instead, you remove all staging labels -// from the version using UpdateSecretVersionStage . This marks the version as -// deprecated, and then Secrets Manager can automatically delete the version in the -// background. To determine whether an application still uses a secret, you can -// create an Amazon CloudWatch alarm to alert you to any attempts to access a -// secret during the recovery window. For more information, see Monitor secrets -// scheduled for deletion (https://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring_cloudwatch_deleted-secrets.html) -// . Secrets Manager performs the permanent secret deletion at the end of the +// the recovery window, Secrets Manager deletes the secret permanently. +// +// You can't delete a primary secret that is replicated to other Regions. You must +// first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you +// delete a replica, it is deleted immediately. +// +// You can't directly delete a version of a secret. Instead, you remove all +// staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, +// and then Secrets Manager can automatically delete the version in the background. +// +// To determine whether an application still uses a secret, you can create an +// Amazon CloudWatch alarm to alert you to any attempts to access a secret during +// the recovery window. For more information, see [Monitor secrets scheduled for deletion]. +// +// Secrets Manager performs the permanent secret deletion at the end of the // waiting period as a background task with low priority. There is no guarantee of -// a specific time after the recovery window for the permanent delete to occur. At -// any time before recovery window ends, you can use RestoreSecret to remove the -// DeletionDate and cancel the deletion of the secret. When a secret is scheduled -// for deletion, you cannot retrieve the secret value. You must first cancel the -// deletion with RestoreSecret and then you can retrieve the secret. Secrets -// Manager generates a CloudTrail log entry when you call this action. Do not -// include sensitive information in request parameters because it might be logged. -// For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:DeleteSecret . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// a specific time after the recovery window for the permanent delete to occur. +// +// At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate +// and cancel the deletion of the secret. +// +// When a secret is scheduled for deletion, you cannot retrieve the secret value. +// You must first cancel the deletion with RestoreSecretand then you can retrieve the secret. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:DeleteSecret . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Monitor secrets scheduled for deletion]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring_cloudwatch_deleted-secrets.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) DeleteSecret(ctx context.Context, params *DeleteSecretInput, optFns ...func(*Options)) (*DeleteSecretOutput, error) { if params == nil { params = &DeleteSecretInput{} @@ -57,10 +67,12 @@ func (c *Client) DeleteSecret(ctx context.Context, params *DeleteSecretInput, op type DeleteSecretInput struct { - // The ARN or name of the secret to delete. For an ARN, we recommend that you - // specify a complete ARN rather than a partial ARN. See Finding a secret from a - // partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret to delete. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -68,17 +80,20 @@ type DeleteSecretInput struct { // Specifies whether to delete the secret without any recovery window. You can't // use both this parameter and RecoveryWindowInDays in the same call. If you don't // use either, then by default Secrets Manager uses a 30 day recovery window. + // // Secrets Manager performs the actual deletion with an asynchronous background // process, so there might be a short delay before the secret is permanently // deleted. If you delete a secret and then immediately create a secret with the - // same name, use appropriate back off and retry logic. If you forcibly delete an - // already deleted or nonexistent secret, the operation does not return - // ResourceNotFoundException . Use this parameter with caution. This parameter - // causes the operation to skip the normal recovery window before the permanent - // deletion that Secrets Manager would normally impose with the - // RecoveryWindowInDays parameter. If you delete a secret with the - // ForceDeleteWithoutRecovery parameter, then you have no opportunity to recover - // the secret. You lose the secret permanently. + // same name, use appropriate back off and retry logic. + // + // If you forcibly delete an already deleted or nonexistent secret, the operation + // does not return ResourceNotFoundException . + // + // Use this parameter with caution. This parameter causes the operation to skip + // the normal recovery window before the permanent deletion that Secrets Manager + // would normally impose with the RecoveryWindowInDays parameter. If you delete a + // secret with the ForceDeleteWithoutRecovery parameter, then you have no + // opportunity to recover the secret. You lose the secret permanently. ForceDeleteWithoutRecovery *bool // The number of days from 7 to 30 that Secrets Manager waits before permanently @@ -164,6 +179,12 @@ func (c *Client) addOperationDeleteSecretMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteSecretValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DescribeSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DescribeSecret.go index 0eb7526cc4a..913b7fee27b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DescribeSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_DescribeSecret.go @@ -14,13 +14,17 @@ import ( // Retrieves the details of a secret. It does not include the encrypted secret // value. Secrets Manager only returns fields that have a value in the response. +// // Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:DescribeSecret . For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:DescribeSecret . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) DescribeSecret(ctx context.Context, params *DescribeSecretInput, optFns ...func(*Options)) (*DescribeSecretOutput, error) { if params == nil { params = &DescribeSecretInput{} @@ -38,9 +42,12 @@ func (c *Client) DescribeSecret(ctx context.Context, params *DescribeSecretInput type DescribeSecretInput struct { - // The ARN or name of the secret. For an ARN, we recommend that you specify a - // complete ARN rather than a partial ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -60,9 +67,11 @@ type DescribeSecretOutput struct { // deletion, this field is omitted. When you delete a secret, Secrets Manager // requires a recovery window of at least 7 days before deleting the secret. Some // time after the deleted date, Secrets Manager deletes the secret, including all - // of its versions. If a secret is scheduled for deletion, then its details, - // including the encrypted secret value, is not accessible. To cancel a scheduled - // deletion and restore access to the secret, use RestoreSecret . + // of its versions. + // + // If a secret is scheduled for deletion, then its details, including the + // encrypted secret value, is not accessible. To cancel a scheduled deletion and + // restore access to the secret, use RestoreSecret. DeletedDate *time.Time // The description of the secret. @@ -93,15 +102,17 @@ type DescribeSecretOutput struct { // isn't configured for rotation or rotation has been disabled, Secrets Manager // returns null. If rotation fails, Secrets Manager retries the entire rotation // process multiple times. If rotation is unsuccessful, this date may be in the - // past. This date represents the latest date that rotation will occur, but it is - // not an approximate rotation date. In some cases, for example if you turn off - // automatic rotation and then turn it back on, the next rotation may occur much - // sooner than this date. + // past. + // + // This date represents the latest date that rotation will occur, but it is not an + // approximate rotation date. In some cases, for example if you turn off automatic + // rotation and then turn it back on, the next rotation may occur much sooner than + // this date. NextRotationDate *time.Time - // The ID of the service that created this secret. For more information, see - // Secrets managed by other Amazon Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html) - // . + // The ID of the service that created this secret. For more information, see [Secrets managed by other Amazon Web Services services]. + // + // [Secrets managed by other Amazon Web Services services]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html OwningService *string // The Region the secret is in. If a secret is replicated to other Regions, the @@ -109,14 +120,19 @@ type DescribeSecretOutput struct { PrimaryRegion *string // A list of the replicas of this secret and their status: + // // - Failed , which indicates that the replica was not created. + // // - InProgress , which indicates that Secrets Manager is in the process of // creating the replica. + // // - InSync , which indicates that the replica was created. ReplicationStatus []types.ReplicationStatusType - // Specifies whether automatic rotation is turned on for this secret. To turn on - // rotation, use RotateSecret . To turn off rotation, use CancelRotateSecret . + // Specifies whether automatic rotation is turned on for this secret. If the + // secret has never been configured for rotation, Secrets Manager returns null. + // + // To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret. RotationEnabled *bool // The ARN of the Lambda function that Secrets Manager invokes to rotate the @@ -129,25 +145,32 @@ type DescribeSecretOutput struct { // rotation turned on, this field is omitted. RotationRules *types.RotationRulesType - // The list of tags attached to the secret. To add tags to a secret, use - // TagResource . To remove tags, use UntagResource . + // The list of tags attached to the secret. To add tags to a secret, use TagResource. To + // remove tags, use UntagResource. Tags []types.Tag // A list of the versions of the secret that have staging labels attached. // Versions that don't have staging labels are considered deprecated and Secrets - // Manager can delete them. Secrets Manager uses staging labels to indicate the - // status of a secret version during rotation. The three staging labels for - // rotation are: + // Manager can delete them. + // + // Secrets Manager uses staging labels to indicate the status of a secret version + // during rotation. The three staging labels for rotation are: + // // - AWSCURRENT , which indicates the current version of the secret. + // // - AWSPENDING , which indicates the version of the secret that contains new // secret information that will become the next current version when rotation - // finishes. During rotation, Secrets Manager creates an AWSPENDING version ID - // before creating the new secret version. To check if a secret version exists, - // call GetSecretValue . + // finishes. + // + // During rotation, Secrets Manager creates an AWSPENDING version ID before + // creating the new secret version. To check if a secret version exists, call GetSecretValue. + // // - AWSPREVIOUS , which indicates the previous current version of the secret. // You can use this as the last known good version. - // For more information about rotation and staging labels, see How rotation works (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) - // . + // + // For more information about rotation and staging labels, see [How rotation works]. + // + // [How rotation works]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html VersionIdsToStages map[string][]string // Metadata pertaining to the operation's result. @@ -211,6 +234,12 @@ func (c *Client) addOperationDescribeSecretMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDescribeSecretValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetRandomPassword.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetRandomPassword.go index f310b6e0a7e..50a1610e167 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetRandomPassword.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetRandomPassword.go @@ -14,12 +14,15 @@ import ( // and include every character type that the system you are generating a password // for can support. By default, Secrets Manager uses uppercase and lowercase // letters, numbers, and the following characters in passwords: -// !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ Secrets Manager generates a CloudTrail log -// entry when you call this action. Required permissions: -// secretsmanager:GetRandomPassword . For more information, see IAM policy -// actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ +// +// Secrets Manager generates a CloudTrail log entry when you call this action. +// +// Required permissions: secretsmanager:GetRandomPassword . For more information, +// see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) GetRandomPassword(ctx context.Context, params *GetRandomPasswordInput, optFns ...func(*Options)) (*GetRandomPasswordOutput, error) { if params == nil { params = &GetRandomPasswordInput{} @@ -139,6 +142,12 @@ func (c *Client) addOperationGetRandomPasswordMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRandomPassword(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetResourcePolicy.go index ce67c7e1dcc..1dd0379d110 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetResourcePolicy.go @@ -12,14 +12,19 @@ import ( // Retrieves the JSON text of the resource-based policy document attached to the // secret. For more information about permissions policies attached to a secret, -// see Permissions policies attached to a secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-policies.html) -// . Secrets Manager generates a CloudTrail log entry when you call this action. Do +// see [Permissions policies attached to a secret]. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:GetResourcePolicy . For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:GetResourcePolicy . For more information, +// see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Permissions policies attached to a secret]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-policies.html func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { if params == nil { params = &GetResourcePolicyInput{} @@ -38,9 +43,12 @@ func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolic type GetResourcePolicyInput struct { // The ARN or name of the secret to retrieve the attached resource-based policy - // for. For an ARN, we recommend that you specify a complete ARN rather than a - // partial ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // for. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -57,9 +65,9 @@ type GetResourcePolicyOutput struct { Name *string // A JSON-formatted string that contains the permissions policy attached to the - // secret. For more information about permissions policies, see Authentication and - // access control for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) - // . + // secret. For more information about permissions policies, see [Authentication and access control for Secrets Manager]. + // + // [Authentication and access control for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html ResourcePolicy *string // Metadata pertaining to the operation's result. @@ -123,6 +131,12 @@ func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetSecretValue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetSecretValue.go index 5f099252bfc..16b70454ace 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetSecretValue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_GetSecretValue.go @@ -12,23 +12,31 @@ import ( ) // Retrieves the contents of the encrypted fields SecretString or SecretBinary -// from the specified version of a secret, whichever contains content. To retrieve -// the values for a group of secrets, call BatchGetSecretValue . We recommend that -// you cache your secret values by using client-side caching. Caching secrets -// improves speed and reduces your costs. For more information, see Cache secrets -// for your applications (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieving-secrets.html) -// . To retrieve the previous version of a secret, use VersionStage and specify -// AWSPREVIOUS. To revert to the previous version of a secret, call -// UpdateSecretVersionStage (https://docs.aws.amazon.com/cli/latest/reference/secretsmanager/update-secret-version-stage.html) -// . Secrets Manager generates a CloudTrail log entry when you call this action. Do +// from the specified version of a secret, whichever contains content. +// +// To retrieve the values for a group of secrets, call BatchGetSecretValue. +// +// We recommend that you cache your secret values by using client-side caching. +// Caching secrets improves speed and reduces your costs. For more information, see +// [Cache secrets for your applications]. +// +// To retrieve the previous version of a secret, use VersionStage and specify +// AWSPREVIOUS. To revert to the previous version of a secret, call [UpdateSecretVersionStage]. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:GetSecretValue . If the secret is -// encrypted using a customer-managed key instead of the Amazon Web Services -// managed key aws/secretsmanager , then you also need kms:Decrypt permissions for -// that key. For more information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:GetSecretValue . If the secret is encrypted +// using a customer-managed key instead of the Amazon Web Services managed key +// aws/secretsmanager , then you also need kms:Decrypt permissions for that key. +// For more information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [UpdateSecretVersionStage]: https://docs.aws.amazon.com/cli/latest/reference/secretsmanager/update-secret-version-stage.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Cache secrets for your applications]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieving-secrets.html func (c *Client) GetSecretValue(ctx context.Context, params *GetSecretValueInput, optFns ...func(*Options)) (*GetSecretValueOutput, error) { if params == nil { params = &GetSecretValueInput{} @@ -46,10 +54,13 @@ func (c *Client) GetSecretValue(ctx context.Context, params *GetSecretValueInput type GetSecretValueInput struct { - // The ARN or name of the secret to retrieve. For an ARN, we recommend that you - // specify a complete ARN rather than a partial ARN. See Finding a secret from a - // partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret to retrieve. To retrieve a secret from another + // account, you must use an ARN. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -57,15 +68,18 @@ type GetSecretValueInput struct { // The unique identifier of the version of the secret to retrieve. If you include // both this parameter and VersionStage , the two parameters must refer to the same // secret version. If you don't specify either a VersionStage or VersionId , then - // Secrets Manager returns the AWSCURRENT version. This value is typically a - // UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) value with - // 32 hexadecimal digits. + // Secrets Manager returns the AWSCURRENT version. + // + // This value is typically a [UUID-type] value with 32 hexadecimal digits. + // + // [UUID-type]: https://wikipedia.org/wiki/Universally_unique_identifier VersionId *string - // The staging label of the version of the secret to retrieve. Secrets Manager - // uses staging labels to keep track of different versions during the rotation - // process. If you include both this parameter and VersionId , the two parameters - // must refer to the same secret version. If you don't specify either a + // The staging label of the version of the secret to retrieve. + // + // Secrets Manager uses staging labels to keep track of different versions during + // the rotation process. If you include both this parameter and VersionId , the two + // parameters must refer to the same secret version. If you don't specify either a // VersionStage or VersionId , Secrets Manager returns the AWSCURRENT version. VersionStage *string @@ -88,16 +102,26 @@ type GetSecretValueOutput struct { // The decrypted secret value, if the secret value was originally provided as // binary data in the form of a byte array. When you retrieve a SecretBinary using // the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is - // Base64-encoded. Otherwise, it is not encoded. If the secret was created by using - // the Secrets Manager console, or if the secret value was originally provided as a - // string, then this field is omitted. The secret value appears in SecretString - // instead. + // Base64-encoded. Otherwise, it is not encoded. + // + // If the secret was created by using the Secrets Manager console, or if the + // secret value was originally provided as a string, then this field is omitted. + // The secret value appears in SecretString instead. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretBinary []byte // The decrypted secret value, if the secret value was originally provided as a - // string or through the Secrets Manager console. If this secret was created by - // using the console, then Secrets Manager stores the information as a JSON - // structure of key/value pairs. + // string or through the Secrets Manager console. + // + // If this secret was created by using the console, then Secrets Manager stores + // the information as a JSON structure of key/value pairs. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretString *string // The unique identifier of this version of the secret. @@ -168,6 +192,12 @@ func (c *Client) addOperationGetSecretValueMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetSecretValueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecretVersionIds.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecretVersionIds.go index 53ff4c41456..2a2ed721095 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecretVersionIds.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecretVersionIds.go @@ -12,16 +12,21 @@ import ( ) // Lists the versions of a secret. Secrets Manager uses staging labels to indicate -// the different versions of a secret. For more information, see Secrets Manager -// concepts: Versions (https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version) -// . To list the secrets in the account, use ListSecrets . Secrets Manager -// generates a CloudTrail log entry when you call this action. Do not include -// sensitive information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:ListSecretVersionIds . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// the different versions of a secret. For more information, see [Secrets Manager concepts: Versions]. +// +// To list the secrets in the account, use ListSecrets. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:ListSecretVersionIds . For more +// information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Secrets Manager concepts: Versions]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) ListSecretVersionIds(ctx context.Context, params *ListSecretVersionIdsInput, optFns ...func(*Options)) (*ListSecretVersionIdsOutput, error) { if params == nil { params = &ListSecretVersionIdsInput{} @@ -39,10 +44,12 @@ func (c *Client) ListSecretVersionIds(ctx context.Context, params *ListSecretVer type ListSecretVersionIdsInput struct { - // The ARN or name of the secret whose versions you want to list. For an ARN, we - // recommend that you specify a complete ARN rather than a partial ARN. See - // Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret whose versions you want to list. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -53,9 +60,11 @@ type ListSecretVersionIdsInput struct { // without staging labels aren't included. IncludeDeprecated *bool - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call ListSecretVersionIds again with the value from NextToken . + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call ListSecretVersionIds again with the + // value from NextToken . MaxResults *int32 // A token that indicates where the output should continue from, if a previous @@ -144,6 +153,12 @@ func (c *Client) addOperationListSecretVersionIdsMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListSecretVersionIdsValidationMiddleware(stack); err != nil { return err } @@ -168,20 +183,14 @@ func (c *Client) addOperationListSecretVersionIdsMiddlewares(stack *middleware.S return nil } -// ListSecretVersionIdsAPIClient is a client that implements the -// ListSecretVersionIds operation. -type ListSecretVersionIdsAPIClient interface { - ListSecretVersionIds(context.Context, *ListSecretVersionIdsInput, ...func(*Options)) (*ListSecretVersionIdsOutput, error) -} - -var _ ListSecretVersionIdsAPIClient = (*Client)(nil) - // ListSecretVersionIdsPaginatorOptions is the paginator options for // ListSecretVersionIds type ListSecretVersionIdsPaginatorOptions struct { - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call ListSecretVersionIds again with the value from NextToken . + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call ListSecretVersionIds again with the + // value from NextToken . Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -242,6 +251,9 @@ func (p *ListSecretVersionIdsPaginator) NextPage(ctx context.Context, optFns ... } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListSecretVersionIds(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -261,6 +273,14 @@ func (p *ListSecretVersionIdsPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// ListSecretVersionIdsAPIClient is a client that implements the +// ListSecretVersionIds operation. +type ListSecretVersionIdsAPIClient interface { + ListSecretVersionIds(context.Context, *ListSecretVersionIdsInput, ...func(*Options)) (*ListSecretVersionIdsOutput, error) +} + +var _ ListSecretVersionIdsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListSecretVersionIds(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecrets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecrets.go index 318cda1be32..e1fdc7c0025 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecrets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ListSecrets.go @@ -13,20 +13,29 @@ import ( // Lists the secrets that are stored by Secrets Manager in the Amazon Web Services // account, not including secrets that are marked for deletion. To see secrets -// marked for deletion, use the Secrets Manager console. All Secrets Manager -// operations are eventually consistent. ListSecrets might not reflect changes from -// the last five minutes. You can get more recent information for a specific secret -// by calling DescribeSecret . To list the versions of a secret, use -// ListSecretVersionIds . To retrieve the values for the secrets, call -// BatchGetSecretValue or GetSecretValue . For information about finding secrets in -// the console, see Find secrets in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_search-secret.html) -// . Secrets Manager generates a CloudTrail log entry when you call this action. Do +// marked for deletion, use the Secrets Manager console. +// +// All Secrets Manager operations are eventually consistent. ListSecrets might not +// reflect changes from the last five minutes. You can get more recent information +// for a specific secret by calling DescribeSecret. +// +// To list the versions of a secret, use ListSecretVersionIds. +// +// To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue. +// +// For information about finding secrets in the console, see [Find secrets in Secrets Manager]. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:ListSecrets . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:ListSecrets . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Find secrets in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_search-secret.html func (c *Client) ListSecrets(ctx context.Context, params *ListSecretsInput, optFns ...func(*Options)) (*ListSecretsOutput, error) { if params == nil { params = &ListSecretsInput{} @@ -51,9 +60,11 @@ type ListSecretsInput struct { // secrets scheduled for deletion aren't included. IncludePlannedDeletion *bool - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call ListSecrets again with the value from NextToken . + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call ListSecrets again with the value from + // NextToken . MaxResults *int32 // A token that indicates where the output should continue from, if a previous @@ -139,6 +150,12 @@ func (c *Client) addOperationListSecretsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSecrets(options.Region), middleware.Before); err != nil { return err } @@ -160,18 +177,13 @@ func (c *Client) addOperationListSecretsMiddlewares(stack *middleware.Stack, opt return nil } -// ListSecretsAPIClient is a client that implements the ListSecrets operation. -type ListSecretsAPIClient interface { - ListSecrets(context.Context, *ListSecretsInput, ...func(*Options)) (*ListSecretsOutput, error) -} - -var _ ListSecretsAPIClient = (*Client)(nil) - // ListSecretsPaginatorOptions is the paginator options for ListSecrets type ListSecretsPaginatorOptions struct { - // The number of results to include in the response. If there are more results - // available, in the response, Secrets Manager includes NextToken . To get the next - // results, call ListSecrets again with the value from NextToken . + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken . To get the next results, call ListSecrets again with the value from + // NextToken . Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -232,6 +244,9 @@ func (p *ListSecretsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListSecrets(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -251,6 +266,13 @@ func (p *ListSecretsPaginator) NextPage(ctx context.Context, optFns ...func(*Opt return result, nil } +// ListSecretsAPIClient is a client that implements the ListSecrets operation. +type ListSecretsAPIClient interface { + ListSecrets(context.Context, *ListSecretsInput, ...func(*Options)) (*ListSecretsOutput, error) +} + +var _ ListSecretsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListSecrets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutResourcePolicy.go index 37e9b5543bb..f778ffb6230 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutResourcePolicy.go @@ -11,17 +11,22 @@ import ( ) // Attaches a resource-based permission policy to a secret. A resource-based -// policy is optional. For more information, see Authentication and access control -// for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// For information about attaching a policy in the console, see Attach a -// permissions policy to a secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html) -// . Secrets Manager generates a CloudTrail log entry when you call this action. Do +// policy is optional. For more information, see [Authentication and access control for Secrets Manager] +// +// For information about attaching a policy in the console, see [Attach a permissions policy to a secret]. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:PutResourcePolicy . For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:PutResourcePolicy . For more information, +// see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Attach a permissions policy to a secret]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Authentication and access control for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} @@ -40,34 +45,42 @@ func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolic type PutResourcePolicyInput struct { // A JSON-formatted string for an Amazon Web Services resource-based policy. For - // example policies, see Permissions policy examples (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html) - // . + // example policies, see [Permissions policy examples]. + // + // [Permissions policy examples]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html // // This member is required. ResourcePolicy *string - // The ARN or name of the secret to attach the resource-based policy. For an ARN, - // we recommend that you specify a complete ARN rather than a partial ARN. See - // Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret to attach the resource-based policy. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string // Specifies whether to block resource-based policies that allow broad access to // the secret, for example those that use a wildcard for the principal. By default, - // public policies aren't blocked. Resource policy validation and the - // BlockPublicPolicy parameter help protect your resources by preventing public - // access from being granted through the resource policies that are directly - // attached to your secrets. In addition to using these features, carefully inspect - // the following policies to confirm that they do not grant public access: + // public policies aren't blocked. + // + // Resource policy validation and the BlockPublicPolicy parameter help protect + // your resources by preventing public access from being granted through the + // resource policies that are directly attached to your secrets. In addition to + // using these features, carefully inspect the following policies to confirm that + // they do not grant public access: + // // - Identity-based policies attached to associated Amazon Web Services // principals (for example, IAM roles) + // // - Resource-based policies attached to associated Amazon Web Services // resources (for example, Key Management Service (KMS) keys) - // To review permissions to your secrets, see Determine who has permissions to - // your secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/determine-acccess_examine-iam-policies.html) - // . + // + // To review permissions to your secrets, see [Determine who has permissions to your secrets]. + // + // [Determine who has permissions to your secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/determine-acccess_examine-iam-policies.html BlockPublicPolicy *bool noSmithyDocumentSerde @@ -142,6 +155,12 @@ func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutSecretValue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutSecretValue.go index 0304d3bbbbc..53b4fde8890 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutSecretValue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_PutSecretValue.go @@ -12,32 +12,47 @@ import ( // Creates a new version with a new encrypted secret value and attaches it to the // secret. The version can contain a new SecretString value or a new SecretBinary -// value. We recommend you avoid calling PutSecretValue at a sustained rate of -// more than once every 10 minutes. When you update the secret value, Secrets -// Manager creates a new version of the secret. Secrets Manager removes outdated -// versions when there are more than 100, but it does not remove versions created -// less than 24 hours ago. If you call PutSecretValue more than once every 10 -// minutes, you create more versions than Secrets Manager removes, and you will -// reach the quota for secret versions. You can specify the staging labels to -// attach to the new version in VersionStages . If you don't include VersionStages -// , then Secrets Manager automatically moves the staging label AWSCURRENT to this -// version. If this operation creates the first version for the secret, then -// Secrets Manager automatically attaches the staging label AWSCURRENT to it. If -// this operation moves the staging label AWSCURRENT from another version to this -// version, then Secrets Manager also automatically moves the staging label -// AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is -// idempotent. If you call this operation with a ClientRequestToken that matches -// an existing version's VersionId, and you specify the same secret data, the -// operation succeeds but does nothing. However, if the secret data is different, -// then the operation fails because you can't modify an existing version; you can -// only create new ones. Secrets Manager generates a CloudTrail log entry when you -// call this action. Do not include sensitive information in request parameters -// except SecretBinary or SecretString because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:PutSecretValue . For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// value. +// +// We recommend you avoid calling PutSecretValue at a sustained rate of more than +// once every 10 minutes. When you update the secret value, Secrets Manager creates +// a new version of the secret. Secrets Manager removes outdated versions when +// there are more than 100, but it does not remove versions created less than 24 +// hours ago. If you call PutSecretValue more than once every 10 minutes, you +// create more versions than Secrets Manager removes, and you will reach the quota +// for secret versions. +// +// You can specify the staging labels to attach to the new version in VersionStages +// . If you don't include VersionStages , then Secrets Manager automatically moves +// the staging label AWSCURRENT to this version. If this operation creates the +// first version for the secret, then Secrets Manager automatically attaches the +// staging label AWSCURRENT to it. If this operation moves the staging label +// AWSCURRENT from another version to this version, then Secrets Manager also +// automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT +// was removed from. +// +// This operation is idempotent. If you call this operation with a +// ClientRequestToken that matches an existing version's VersionId, and you specify +// the same secret data, the operation succeeds but does nothing. However, if the +// secret data is different, then the operation fails because you can't modify an +// existing version; you can only create new ones. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters except SecretBinary , +// SecretString , or RotationToken because it might be logged. For more +// information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:PutSecretValue . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// When you enter commands in a command shell, there is a risk of the command +// history being accessed or utilities having access to your command parameters. +// This is a concern if the command includes the value of a secret. Learn how to [Mitigate the risks of using command-line tools to store Secrets Manager secrets]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Mitigate the risks of using command-line tools to store Secrets Manager secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/security_cli-exposure-risks.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) PutSecretValue(ctx context.Context, params *PutSecretValueInput, optFns ...func(*Options)) (*PutSecretValueOutput, error) { if params == nil { params = &PutSecretValueInput{} @@ -55,57 +70,100 @@ func (c *Client) PutSecretValue(ctx context.Context, params *PutSecretValueInput type PutSecretValueInput struct { - // The ARN or name of the secret to add a new version to. For an ARN, we recommend - // that you specify a complete ARN rather than a partial ARN. See Finding a secret - // from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . If the secret doesn't already exist, use CreateSecret instead. + // The ARN or name of the secret to add a new version to. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // If the secret doesn't already exist, use CreateSecret instead. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string - // A unique identifier for the new version of the secret. If you use the Amazon - // Web Services CLI or one of the Amazon Web Services SDKs to call this operation, - // then you can leave this parameter empty. The CLI or SDK generates a random UUID - // for you and includes it as the value for this parameter in the request. If you - // generate a raw HTTP request to the Secrets Manager service endpoint, then you - // must generate a ClientRequestToken and include it in the request. This value - // helps ensure idempotency. Secrets Manager uses this value to prevent the - // accidental creation of duplicate versions if there are failures and retries - // during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) - // value to ensure uniqueness of your versions within the specified secret. + // A unique identifier for the new version of the secret. + // + // If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs + // to call this operation, then you can leave this parameter empty. The CLI or SDK + // generates a random UUID for you and includes it as the value for this parameter + // in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a [UUID-type]value to ensure uniqueness + // of your versions within the specified secret. + // // - If the ClientRequestToken value isn't already associated with a version of // the secret then a new version of the secret is created. + // // - If a version with this value already exists and that version's SecretString // or SecretBinary values are the same as those in the request then the request // is ignored. The operation is idempotent. + // // - If a version with this value already exists and the version of the // SecretString and SecretBinary values are different from those in the request, // then the request fails because you can't modify a secret version. You can only // create new versions to store new secret values. + // // This value becomes the VersionId of the new version. + // + // [UUID-type]: https://wikipedia.org/wiki/Universally_unique_identifier ClientRequestToken *string + // A unique identifier that indicates the source of the request. For cross-account + // rotation (when you rotate a secret in one account by using a Lambda rotation + // function in another account) and the Lambda rotation function assumes an IAM + // role to call Secrets Manager, Secrets Manager validates the identity with the + // rotation token. For more information, see [How rotation works]. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. + // + // [How rotation works]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html + RotationToken *string + // The binary data to encrypt and store in the new version of the secret. To use // this parameter in the command-line tools, we recommend that you store your - // binary data in a file and then pass the contents of the file as a parameter. You - // must include SecretBinary or SecretString , but not both. You can't access this - // value from the Secrets Manager console. + // binary data in a file and then pass the contents of the file as a parameter. + // + // You must include SecretBinary or SecretString , but not both. + // + // You can't access this value from the Secrets Manager console. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretBinary []byte - // The text to encrypt and store in the new version of the secret. You must - // include SecretBinary or SecretString , but not both. We recommend you create the - // secret string as JSON key/value pairs, as shown in the example. + // The text to encrypt and store in the new version of the secret. + // + // You must include SecretBinary or SecretString , but not both. + // + // We recommend you create the secret string as JSON key/value pairs, as shown in + // the example. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretString *string // A list of staging labels to attach to this version of the secret. Secrets // Manager uses staging labels to track versions of a secret through the rotation - // process. If you specify a staging label that's already associated with a - // different version of the same secret, then Secrets Manager removes the label - // from the other version and attaches it to this version. If you specify - // AWSCURRENT , and it is already attached to another version, then Secrets Manager - // also moves the staging label AWSPREVIOUS to the version that AWSCURRENT was - // removed from. If you don't include VersionStages , then Secrets Manager - // automatically moves the staging label AWSCURRENT to this version. + // process. + // + // If you specify a staging label that's already associated with a different + // version of the same secret, then Secrets Manager removes the label from the + // other version and attaches it to this version. If you specify AWSCURRENT , and + // it is already attached to another version, then Secrets Manager also moves the + // staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. + // + // If you don't include VersionStages , then Secrets Manager automatically moves + // the staging label AWSCURRENT to this version. VersionStages []string noSmithyDocumentSerde @@ -188,6 +246,12 @@ func (c *Client) addOperationPutSecretValueMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opPutSecretValueMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RemoveRegionsFromReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RemoveRegionsFromReplication.go index 404d3e4a45c..53784bd3278 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RemoveRegionsFromReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RemoveRegionsFromReplication.go @@ -12,14 +12,18 @@ import ( ) // For a secret that is replicated to other Regions, deletes the secret replicas -// from the Regions you specify. Secrets Manager generates a CloudTrail log entry -// when you call this action. Do not include sensitive information in request -// parameters because it might be logged. For more information, see Logging -// Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:RemoveRegionsFromReplication . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// from the Regions you specify. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:RemoveRegionsFromReplication . For more +// information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) RemoveRegionsFromReplication(ctx context.Context, params *RemoveRegionsFromReplicationInput, optFns ...func(*Options)) (*RemoveRegionsFromReplicationOutput, error) { if params == nil { params = &RemoveRegionsFromReplicationInput{} @@ -119,6 +123,12 @@ func (c *Client) addOperationRemoveRegionsFromReplicationMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRemoveRegionsFromReplicationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ReplicateSecretToRegions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ReplicateSecretToRegions.go index 896017c39f8..25ea4da585f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ReplicateSecretToRegions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ReplicateSecretToRegions.go @@ -11,17 +11,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Replicates the secret to a new Regions. See Multi-Region secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/create-manage-multi-region-secrets.html) -// . Secrets Manager generates a CloudTrail log entry when you call this action. Do +// Replicates the secret to a new Regions. See [Multi-Region secrets]. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:ReplicateSecretToRegions . If the primary +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:ReplicateSecretToRegions . If the primary // secret is encrypted with a KMS key other than aws/secretsmanager , you also need // kms:Decrypt permission to the key. To encrypt the replicated secret with a KMS // key other than aws/secretsmanager , you need kms:GenerateDataKey and kms:Encrypt -// to the key. For more information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// to the key. For more information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Multi-Region secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/create-manage-multi-region-secrets.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) ReplicateSecretToRegions(ctx context.Context, params *ReplicateSecretToRegionsInput, optFns ...func(*Options)) (*ReplicateSecretToRegionsOutput, error) { if params == nil { params = &ReplicateSecretToRegionsInput{} @@ -125,6 +130,12 @@ func (c *Client) addOperationReplicateSecretToRegionsMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpReplicateSecretToRegionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RestoreSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RestoreSecret.go index db3b6622f03..f89d8dab3ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RestoreSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RestoreSecret.go @@ -11,14 +11,18 @@ import ( ) // Cancels the scheduled deletion of a secret by removing the DeletedDate time -// stamp. You can access a secret again after it has been restored. Secrets Manager -// generates a CloudTrail log entry when you call this action. Do not include -// sensitive information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:RestoreSecret . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// stamp. You can access a secret again after it has been restored. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:RestoreSecret . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) RestoreSecret(ctx context.Context, params *RestoreSecretInput, optFns ...func(*Options)) (*RestoreSecretOutput, error) { if params == nil { params = &RestoreSecretInput{} @@ -36,10 +40,12 @@ func (c *Client) RestoreSecret(ctx context.Context, params *RestoreSecretInput, type RestoreSecretInput struct { - // The ARN or name of the secret to restore. For an ARN, we recommend that you - // specify a complete ARN rather than a partial ARN. See Finding a secret from a - // partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret to restore. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -116,6 +122,12 @@ func (c *Client) addOperationRestoreSecretMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRestoreSecretValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RotateSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RotateSecret.go index f5ea3c9b804..c92a727e5ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RotateSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_RotateSecret.go @@ -12,28 +12,35 @@ import ( ) // Configures and starts the asynchronous process of rotating the secret. For -// information about rotation, see Rotate secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html) -// in the Secrets Manager User Guide. If you include the configuration parameters, -// the operation sets the values for the secret and then immediately starts a -// rotation. If you don't include the configuration parameters, the operation -// starts a rotation with the values already stored in the secret. When rotation is -// successful, the AWSPENDING staging label might be attached to the same version -// as the AWSCURRENT version, or it might not be attached to any version. If the -// AWSPENDING staging label is present but not attached to the same version as -// AWSCURRENT , then any later invocation of RotateSecret assumes that a previous -// rotation request is still in progress and returns an error. When rotation is -// unsuccessful, the AWSPENDING staging label might be attached to an empty secret -// version. For more information, see Troubleshoot rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot_rotation.html) -// in the Secrets Manager User Guide. Secrets Manager generates a CloudTrail log -// entry when you call this action. Do not include sensitive information in request -// parameters because it might be logged. For more information, see Logging -// Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:RotateSecret . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . You also need lambda:InvokeFunction permissions on the rotation function. For -// more information, see Permissions for rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets-required-permissions-function.html) -// . +// information about rotation, see [Rotate secrets]in the Secrets Manager User Guide. If you +// include the configuration parameters, the operation sets the values for the +// secret and then immediately starts a rotation. If you don't include the +// configuration parameters, the operation starts a rotation with the values +// already stored in the secret. +// +// When rotation is successful, the AWSPENDING staging label might be attached to +// the same version as the AWSCURRENT version, or it might not be attached to any +// version. If the AWSPENDING staging label is present but not attached to the +// same version as AWSCURRENT , then any later invocation of RotateSecret assumes +// that a previous rotation request is still in progress and returns an error. When +// rotation is unsuccessful, the AWSPENDING staging label might be attached to an +// empty secret version. For more information, see [Troubleshoot rotation]in the Secrets Manager User +// Guide. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:RotateSecret . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. You also need lambda:InvokeFunction permissions on the rotation function. +// For more information, see [Permissions for rotation]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Permissions for rotation]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets-required-permissions-function.html +// [Rotate secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Troubleshoot rotation]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot_rotation.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) RotateSecret(ctx context.Context, params *RotateSecretInput, optFns ...func(*Options)) (*RotateSecretOutput, error) { if params == nil { params = &RotateSecretInput{} @@ -51,42 +58,56 @@ func (c *Client) RotateSecret(ctx context.Context, params *RotateSecretInput, op type RotateSecretInput struct { - // The ARN or name of the secret to rotate. For an ARN, we recommend that you - // specify a complete ARN rather than a partial ARN. See Finding a secret from a - // partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret to rotate. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string // A unique identifier for the new version of the secret. You only need to specify // this value if you implement your own retry logic and you want to ensure that - // Secrets Manager doesn't attempt to create a secret version twice. If you use the - // Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this - // operation, then you can leave this parameter empty. The CLI or SDK generates a - // random UUID for you and includes it as the value for this parameter in the - // request. If you generate a raw HTTP request to the Secrets Manager service - // endpoint, then you must generate a ClientRequestToken and include it in the - // request. This value helps ensure idempotency. Secrets Manager uses this value to - // prevent the accidental creation of duplicate versions if there are failures and - // retries during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) - // value to ensure uniqueness of your versions within the specified secret. + // Secrets Manager doesn't attempt to create a secret version twice. + // + // If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs + // to call this operation, then you can leave this parameter empty. The CLI or SDK + // generates a random UUID for you and includes it as the value for this parameter + // in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a [UUID-type]value to ensure uniqueness + // of your versions within the specified secret. + // + // [UUID-type]: https://wikipedia.org/wiki/Universally_unique_identifier ClientRequestToken *string // Specifies whether to rotate the secret immediately or wait until the next - // scheduled rotation window. The rotation schedule is defined in - // RotateSecretRequest$RotationRules . For secrets that use a Lambda rotation - // function to rotate, if you don't immediately rotate the secret, Secrets Manager - // tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) - // of the Lambda rotation function. The test creates an AWSPENDING version of the - // secret and then removes it. By default, Secrets Manager rotates the secret - // immediately. + // scheduled rotation window. The rotation schedule is defined in RotateSecretRequest$RotationRules. + // + // For secrets that use a Lambda rotation function to rotate, if you don't + // immediately rotate the secret, Secrets Manager tests the rotation configuration + // by running the [testSecret step]testSecret of the Lambda rotation function. The test creates an + // AWSPENDING version of the secret and then removes it. + // + // By default, Secrets Manager rotates the secret immediately. + // + // [testSecret step]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_lambda-functions.html#rotate-secrets_lambda-functions-code RotateImmediately *bool // For secrets that use a Lambda rotation function to rotate, the ARN of the - // Lambda rotation function. For secrets that use managed rotation, omit this - // field. For more information, see Managed rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_managed.html) - // in the Secrets Manager User Guide. + // Lambda rotation function. + // + // For secrets that use managed rotation, omit this field. For more information, + // see [Managed rotation]in the Secrets Manager User Guide. + // + // [Managed rotation]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_managed.html RotationLambdaARN *string // A structure that defines the rotation configuration for this secret. @@ -167,6 +188,12 @@ func (c *Client) addOperationRotateSecretMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opRotateSecretMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_StopReplicationToReplica.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_StopReplicationToReplica.go index 8f1a14f1ca6..c9d2a45519e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_StopReplicationToReplica.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_StopReplicationToReplica.go @@ -11,16 +11,21 @@ import ( ) // Removes the link between the replica secret and the primary secret and promotes -// the replica to a primary secret in the replica Region. You must call this -// operation from the Region in which you want to promote the replica to a primary -// secret. Secrets Manager generates a CloudTrail log entry when you call this -// action. Do not include sensitive information in request parameters because it -// might be logged. For more information, see Logging Secrets Manager events with -// CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:StopReplicationToReplica . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// the replica to a primary secret in the replica Region. +// +// You must call this operation from the Region in which you want to promote the +// replica to a primary secret. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:StopReplicationToReplica . For more +// information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) StopReplicationToReplica(ctx context.Context, params *StopReplicationToReplicaInput, optFns ...func(*Options)) (*StopReplicationToReplicaOutput, error) { if params == nil { params = &StopReplicationToReplicaInput{} @@ -113,6 +118,12 @@ func (c *Client) addOperationStopReplicationToReplicaMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStopReplicationToReplicaValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_TagResource.go index 774df63f302..16a9cbceeb7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_TagResource.go @@ -13,20 +13,27 @@ import ( // Attaches tags to a secret. Tags consist of a key name and a value. Tags are // part of the secret's metadata. They are not associated with specific versions of -// the secret. This operation appends tags to the existing list of tags. For tag -// quotas and naming restrictions, see Service quotas for Tagging (https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas) -// in the Amazon Web Services General Reference guide. If you use tags as part of -// your security strategy, then adding or removing a tag can change permissions. If -// successfully completing this operation would result in you losing your -// permissions for this secret, then the operation is blocked and returns an Access -// Denied error. Secrets Manager generates a CloudTrail log entry when you call -// this action. Do not include sensitive information in request parameters because -// it might be logged. For more information, see Logging Secrets Manager events -// with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:TagResource . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// the secret. This operation appends tags to the existing list of tags. +// +// For tag quotas and naming restrictions, see [Service quotas for Tagging] in the Amazon Web Services General +// Reference guide. +// +// If you use tags as part of your security strategy, then adding or removing a +// tag can change permissions. If successfully completing this operation would +// result in you losing your permissions for this secret, then the operation is +// blocked and returns an Access Denied error. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:TagResource . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Service quotas for Tagging]: https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -45,20 +52,24 @@ func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optF type TagResourceInput struct { // The identifier for the secret to attach tags to. You can specify either the - // Amazon Resource Name (ARN) or the friendly name of the secret. For an ARN, we - // recommend that you specify a complete ARN rather than a partial ARN. See - // Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // Amazon Resource Name (ARN) or the friendly name of the secret. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string // The tags to attach to the secret as a JSON text string argument. Each element - // in the list consists of a Key and a Value . For storing multiple values, we - // recommend that you use a JSON text string argument and specify key/value pairs. - // For more information, see Specifying parameter values for the Amazon Web - // Services CLI (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html) - // in the Amazon Web Services CLI User Guide. + // in the list consists of a Key and a Value . + // + // For storing multiple values, we recommend that you use a JSON text string + // argument and specify key/value pairs. For more information, see [Specifying parameter values for the Amazon Web Services CLI]in the Amazon + // Web Services CLI User Guide. + // + // [Specifying parameter values for the Amazon Web Services CLI]: https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html // // This member is required. Tags []types.Tag @@ -128,6 +139,12 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UntagResource.go index faabca5ea97..904a18dd2b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UntagResource.go @@ -10,19 +10,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes specific tags from a secret. This operation is idempotent. If a -// requested tag is not attached to the secret, no error is returned and the secret -// metadata is unchanged. If you use tags as part of your security strategy, then -// removing a tag can change permissions. If successfully completing this operation -// would result in you losing your permissions for this secret, then the operation -// is blocked and returns an Access Denied error. Secrets Manager generates a -// CloudTrail log entry when you call this action. Do not include sensitive -// information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:UntagResource . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// Removes specific tags from a secret. +// +// This operation is idempotent. If a requested tag is not attached to the secret, +// no error is returned and the secret metadata is unchanged. +// +// If you use tags as part of your security strategy, then removing a tag can +// change permissions. If successfully completing this operation would result in +// you losing your permissions for this secret, then the operation is blocked and +// returns an Access Denied error. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:UntagResource . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} @@ -40,19 +47,26 @@ func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, type UntagResourceInput struct { - // The ARN or name of the secret. For an ARN, we recommend that you specify a - // complete ARN rather than a partial ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string // A list of tag key names to remove from the secret. You don't specify the value. - // Both the key and its associated value are removed. This parameter requires a - // JSON text string argument. For storing multiple values, we recommend that you - // use a JSON text string argument and specify key/value pairs. For more - // information, see Specifying parameter values for the Amazon Web Services CLI (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html) - // in the Amazon Web Services CLI User Guide. + // Both the key and its associated value are removed. + // + // This parameter requires a JSON text string argument. + // + // For storing multiple values, we recommend that you use a JSON text string + // argument and specify key/value pairs. For more information, see [Specifying parameter values for the Amazon Web Services CLI]in the Amazon + // Web Services CLI User Guide. + // + // [Specifying parameter values for the Amazon Web Services CLI]: https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html // // This member is required. TagKeys []string @@ -122,6 +136,12 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecret.go index a18d03351c7..58f3741767e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecret.go @@ -11,37 +11,52 @@ import ( ) // Modifies the details of a secret, including metadata and the secret value. To -// change the secret value, you can also use PutSecretValue . To change the -// rotation configuration of a secret, use RotateSecret instead. To change a -// secret so that it is managed by another service, you need to recreate the secret -// in that service. See Secrets Manager secrets managed by other Amazon Web -// Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html) -// . We recommend you avoid calling UpdateSecret at a sustained rate of more than +// change the secret value, you can also use PutSecretValue. +// +// To change the rotation configuration of a secret, use RotateSecret instead. +// +// To change a secret so that it is managed by another service, you need to +// recreate the secret in that service. See [Secrets Manager secrets managed by other Amazon Web Services services]. +// +// We recommend you avoid calling UpdateSecret at a sustained rate of more than // once every 10 minutes. When you call UpdateSecret to update the secret value, // Secrets Manager creates a new version of the secret. Secrets Manager removes // outdated versions when there are more than 100, but it does not remove versions // created less than 24 hours ago. If you update the secret value more than once // every 10 minutes, you create more versions than Secrets Manager removes, and you -// will reach the quota for secret versions. If you include SecretString or -// SecretBinary to create a new secret version, Secrets Manager automatically moves -// the staging label AWSCURRENT to the new version. Then it attaches the label -// AWSPREVIOUS to the version that AWSCURRENT was removed from. If you call this -// operation with a ClientRequestToken that matches an existing version's VersionId -// , the operation results in an error. You can't modify an existing version, you -// can only create a new version. To remove a version, remove all staging labels -// from it. See UpdateSecretVersionStage . Secrets Manager generates a CloudTrail -// log entry when you call this action. Do not include sensitive information in -// request parameters except SecretBinary or SecretString because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:UpdateSecret . For more information, see -// IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . If you use a customer managed key, you must also have kms:GenerateDataKey , -// kms:Encrypt , and kms:Decrypt permissions on the key. If you change the KMS key -// and you don't have kms:Encrypt permission to the new key, Secrets Manager does -// not re-ecrypt existing secret versions with the new key. For more information, -// see Secret encryption and decryption (https://docs.aws.amazon.com/secretsmanager/latest/userguide/security-encryption.html) -// . +// will reach the quota for secret versions. +// +// If you include SecretString or SecretBinary to create a new secret version, +// Secrets Manager automatically moves the staging label AWSCURRENT to the new +// version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT +// was removed from. +// +// If you call this operation with a ClientRequestToken that matches an existing +// version's VersionId , the operation results in an error. You can't modify an +// existing version, you can only create a new version. To remove a version, remove +// all staging labels from it. See UpdateSecretVersionStage. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters except SecretBinary or +// SecretString because it might be logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:UpdateSecret . For more information, see [IAM policy actions for Secrets Manager] +// and [Authentication and access control in Secrets Manager]. If you use a customer managed key, you must also have kms:GenerateDataKey +// , kms:Encrypt , and kms:Decrypt permissions on the key. If you change the KMS +// key and you don't have kms:Encrypt permission to the new key, Secrets Manager +// does not re-encrypt existing secret versions with the new key. For more +// information, see [Secret encryption and decryption]. +// +// When you enter commands in a command shell, there is a risk of the command +// history being accessed or utilities having access to your command parameters. +// This is a concern if the command includes the value of a secret. Learn how to [Mitigate the risks of using command-line tools to store Secrets Manager secrets]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Secret encryption and decryption]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/security-encryption.html +// [Secrets Manager secrets managed by other Amazon Web Services services]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html +// [Mitigate the risks of using command-line tools to store Secrets Manager secrets]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/security_cli-exposure-risks.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) UpdateSecret(ctx context.Context, params *UpdateSecretInput, optFns ...func(*Options)) (*UpdateSecretOutput, error) { if params == nil { params = &UpdateSecretInput{} @@ -59,25 +74,34 @@ func (c *Client) UpdateSecret(ctx context.Context, params *UpdateSecretInput, op type UpdateSecretInput struct { - // The ARN or name of the secret. For an ARN, we recommend that you specify a - // complete ARN rather than a partial ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // The ARN or name of the secret. + // + // For an ARN, we recommend that you specify a complete ARN rather than a partial + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string // If you include SecretString or SecretBinary , then Secrets Manager creates a new // version for the secret, and this parameter specifies the unique identifier for - // the new version. If you use the Amazon Web Services CLI or one of the Amazon Web - // Services SDKs to call this operation, then you can leave this parameter empty. - // The CLI or SDK generates a random UUID for you and includes it as the value for - // this parameter in the request. If you generate a raw HTTP request to the Secrets - // Manager service endpoint, then you must generate a ClientRequestToken and - // include it in the request. This value helps ensure idempotency. Secrets Manager - // uses this value to prevent the accidental creation of duplicate versions if - // there are failures and retries during a rotation. We recommend that you generate - // a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) value to - // ensure uniqueness of your versions within the specified secret. + // the new version. + // + // If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs + // to call this operation, then you can leave this parameter empty. The CLI or SDK + // generates a random UUID for you and includes it as the value for this parameter + // in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a [UUID-type]value to ensure uniqueness + // of your versions within the specified secret. + // + // [UUID-type]: https://wikipedia.org/wiki/Universally_unique_identifier ClientRequestToken *string // The description of the secret. @@ -86,35 +110,52 @@ type UpdateSecretInput struct { // The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt // new secret versions as well as any existing versions with the staging labels // AWSCURRENT , AWSPENDING , or AWSPREVIOUS . If you don't have kms:Encrypt - // permission to the new key, Secrets Manager does not re-ecrypt existing secret + // permission to the new key, Secrets Manager does not re-encrypt existing secret // versions with the new key. For more information about versions and staging - // labels, see Concepts: Version (https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version) - // . A key alias is always prefixed by alias/ , for example - // alias/aws/secretsmanager . For more information, see About aliases (https://docs.aws.amazon.com/kms/latest/developerguide/alias-about.html) - // . If you set this to an empty string, Secrets Manager uses the Amazon Web + // labels, see [Concepts: Version]. + // + // A key alias is always prefixed by alias/ , for example alias/aws/secretsmanager + // . For more information, see [About aliases]. + // + // If you set this to an empty string, Secrets Manager uses the Amazon Web // Services managed key aws/secretsmanager . If this key doesn't already exist in // your account, then Secrets Manager creates it for you automatically. All users // and roles in the Amazon Web Services account automatically have access to use // aws/secretsmanager . Creating aws/secretsmanager can result in a one-time - // significant delay in returning the result. You can only use the Amazon Web - // Services managed key aws/secretsmanager if you call this operation using - // credentials from the same Amazon Web Services account that owns the secret. If - // the secret is in a different account, then you must use a customer managed key - // and provide the ARN of that KMS key in this field. The user making the call must - // have permissions to both the secret and the KMS key in their respective - // accounts. + // significant delay in returning the result. + // + // You can only use the Amazon Web Services managed key aws/secretsmanager if you + // call this operation using credentials from the same Amazon Web Services account + // that owns the secret. If the secret is in a different account, then you must use + // a customer managed key and provide the ARN of that KMS key in this field. The + // user making the call must have permissions to both the secret and the KMS key in + // their respective accounts. + // + // [Concepts: Version]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version + // [About aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/alias-about.html KmsKeyId *string // The binary data to encrypt and store in the new version of the secret. We // recommend that you store your binary data in a file and then pass the contents - // of the file as a parameter. Either SecretBinary or SecretString must have a - // value, but not both. You can't access this parameter in the Secrets Manager - // console. + // of the file as a parameter. + // + // Either SecretBinary or SecretString must have a value, but not both. + // + // You can't access this parameter in the Secrets Manager console. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretBinary []byte // The text data to encrypt and store in the new version of the secret. We // recommend you use a JSON structure of key/value pairs for your secret value. + // // Either SecretBinary or SecretString must have a value, but not both. + // + // Sensitive: This field contains sensitive information, so the service does not + // include it in CloudTrail log entries. If you create your own log entries, you + // must also avoid logging the information in this field. SecretString *string noSmithyDocumentSerde @@ -193,6 +234,12 @@ func (c *Client) addOperationUpdateSecretMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opUpdateSecretMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecretVersionStage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecretVersionStage.go index 922bb7fe109..b80350eb478 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecretVersionStage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_UpdateSecretVersionStage.go @@ -16,21 +16,31 @@ import ( // time. To add a staging label to a version when it is already attached to another // version, Secrets Manager first removes it from the other version first and then // attaches it to this one. For more information about versions and staging labels, -// see Concepts: Version (https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version) -// . The staging labels that you specify in the VersionStage parameter are added -// to the existing list of staging labels for the version. You can move the -// AWSCURRENT staging label to this version by including it in this call. Whenever -// you move AWSCURRENT , Secrets Manager automatically moves the label AWSPREVIOUS -// to the version that AWSCURRENT was removed from. If this action results in the -// last label being removed from a version, then the version is considered to be -// 'deprecated' and can be deleted by Secrets Manager. Secrets Manager generates a -// CloudTrail log entry when you call this action. Do not include sensitive -// information in request parameters because it might be logged. For more -// information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:UpdateSecretVersionStage . For more -// information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// see [Concepts: Version]. +// +// The staging labels that you specify in the VersionStage parameter are added to +// the existing list of staging labels for the version. +// +// You can move the AWSCURRENT staging label to this version by including it in +// this call. +// +// Whenever you move AWSCURRENT , Secrets Manager automatically moves the label +// AWSPREVIOUS to the version that AWSCURRENT was removed from. +// +// If this action results in the last label being removed from a version, then the +// version is considered to be 'deprecated' and can be deleted by Secrets Manager. +// +// Secrets Manager generates a CloudTrail log entry when you call this action. Do +// not include sensitive information in request parameters because it might be +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:UpdateSecretVersionStage . For more +// information, see [IAM policy actions for Secrets Manager]and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [Concepts: Version]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions func (c *Client) UpdateSecretVersionStage(ctx context.Context, params *UpdateSecretVersionStageInput, optFns ...func(*Options)) (*UpdateSecretVersionStageOutput, error) { if params == nil { params = &UpdateSecretVersionStageInput{} @@ -49,9 +59,11 @@ func (c *Client) UpdateSecretVersionStage(ctx context.Context, params *UpdateSec type UpdateSecretVersionStageInput struct { // The ARN or the name of the secret with the version and staging labelsto modify. + // // For an ARN, we recommend that you specify a complete ARN rather than a partial - // ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen) - // . + // ARN. See [Finding a secret from a partial ARN]. + // + // [Finding a secret from a partial ARN]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen // // This member is required. SecretId *string @@ -62,9 +74,10 @@ type UpdateSecretVersionStageInput struct { VersionStage *string // The ID of the version to add the staging label to. To remove a label from a - // version, then do not specify this parameter. If the staging label is already - // attached to a different version of the secret, then you must also specify the - // RemoveFromVersionId parameter. + // version, then do not specify this parameter. + // + // If the staging label is already attached to a different version of the secret, + // then you must also specify the RemoveFromVersionId parameter. MoveToVersionId *string // The ID of the version that the staging label is to be removed from. If the @@ -147,6 +160,12 @@ func (c *Client) addOperationUpdateSecretVersionStageMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUpdateSecretVersionStageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ValidateResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ValidateResourcePolicy.go index ac161518371..fb1d6c66657 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ValidateResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/api_op_ValidateResourcePolicy.go @@ -12,23 +12,29 @@ import ( ) // Validates that a resource policy does not grant a wide range of principals -// access to your secret. A resource-based policy is optional for secrets. The API -// performs three checks when validating the policy: -// - Sends a call to Zelkova (https://aws.amazon.com/blogs/security/protect-sensitive-data-in-the-cloud-with-automated-reasoning-zelkova/) -// , an automated reasoning engine, to ensure your resource policy does not allow -// broad access to your secret, for example policies that use a wildcard for the -// principal. +// access to your secret. A resource-based policy is optional for secrets. +// +// The API performs three checks when validating the policy: +// +// - Sends a call to [Zelkova], an automated reasoning engine, to ensure your resource +// policy does not allow broad access to your secret, for example policies that use +// a wildcard for the principal. +// // - Checks for correct syntax in a policy. +// // - Verifies the policy does not lock out a caller. // // Secrets Manager generates a CloudTrail log entry when you call this action. Do // not include sensitive information in request parameters because it might be -// logged. For more information, see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html) -// . Required permissions: secretsmanager:ValidateResourcePolicy and -// secretsmanager:PutResourcePolicy . For more information, see IAM policy -// actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) -// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) -// . +// logged. For more information, see [Logging Secrets Manager events with CloudTrail]. +// +// Required permissions: secretsmanager:ValidateResourcePolicy and +// secretsmanager:PutResourcePolicy . For more information, see [IAM policy actions for Secrets Manager] and [Authentication and access control in Secrets Manager]. +// +// [Authentication and access control in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html +// [Logging Secrets Manager events with CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html +// [IAM policy actions for Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions +// [Zelkova]: https://aws.amazon.com/blogs/security/protect-sensitive-data-in-the-cloud-with-automated-reasoning-zelkova/ func (c *Client) ValidateResourcePolicy(ctx context.Context, params *ValidateResourcePolicyInput, optFns ...func(*Options)) (*ValidateResourcePolicyOutput, error) { if params == nil { params = &ValidateResourcePolicyInput{} @@ -48,13 +54,15 @@ type ValidateResourcePolicyInput struct { // A JSON-formatted string that contains an Amazon Web Services resource-based // policy. The policy in the string identifies who can access or manage this secret - // and its versions. For example policies, see Permissions policy examples (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html) - // . + // and its versions. For example policies, see [Permissions policy examples]. + // + // [Permissions policy examples]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html // // This member is required. ResourcePolicy *string - // This field is reserved for internal use. + // The ARN or name of the secret with the resource-based policy you want to + // validate. SecretId *string noSmithyDocumentSerde @@ -129,6 +137,12 @@ func (c *Client) addOperationValidateResourcePolicyMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpValidateResourcePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/auth.go index ff911710d8d..0b1d10070dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/deserializers.go index 82fd102c2ad..7873fb62a6d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/deserializers.go @@ -19,8 +19,17 @@ import ( "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsjson11_deserializeOpBatchGetSecretValue struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/doc.go index e3e68d833d0..2d0ba2e6562 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/doc.go @@ -3,29 +3,43 @@ // Package secretsmanager provides the API client, operations, and parameter types // for AWS Secrets Manager. // -// Amazon Web Services Secrets Manager Amazon Web Services Secrets Manager -// provides a service to enable you to store, manage, and retrieve, secrets. This -// guide provides descriptions of the Secrets Manager API. For more information -// about using this service, see the Amazon Web Services Secrets Manager User Guide (https://docs.aws.amazon.com/secretsmanager/latest/userguide/introduction.html) -// . API Version This version of the Secrets Manager API Reference documents the -// Secrets Manager API version 2017-10-17. For a list of endpoints, see Amazon Web -// Services Secrets Manager endpoints (https://docs.aws.amazon.com/secretsmanager/latest/userguide/asm_access.html#endpoints) -// . Support and Feedback for Amazon Web Services Secrets Manager We welcome your -// feedback. Send your comments to awssecretsmanager-feedback@amazon.com (mailto:awssecretsmanager-feedback@amazon.com) -// , or post your feedback and questions in the Amazon Web Services Secrets -// Manager Discussion Forum (http://forums.aws.amazon.com/forum.jspa?forumID=296) . -// For more information about the Amazon Web Services Discussion Forums, see -// Forums Help (http://forums.aws.amazon.com/help.jspa) . Logging API Requests +// # Amazon Web Services Secrets Manager +// +// Amazon Web Services Secrets Manager provides a service to enable you to store, +// manage, and retrieve, secrets. +// +// This guide provides descriptions of the Secrets Manager API. For more +// information about using this service, see the [Amazon Web Services Secrets Manager User Guide]. +// +// # API Version +// +// This version of the Secrets Manager API Reference documents the Secrets Manager +// API version 2017-10-17. +// +// For a list of endpoints, see [Amazon Web Services Secrets Manager endpoints]. +// +// # Support and Feedback for Amazon Web Services Secrets Manager +// +// We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and +// questions in the [Amazon Web Services Secrets Manager Discussion Forum]. For more information about the Amazon Web Services +// Discussion Forums, see [Forums Help]. +// +// # Logging API Requests +// // Amazon Web Services Secrets Manager supports Amazon Web Services CloudTrail, a // service that records Amazon Web Services API calls for your Amazon Web Services // account and delivers log files to an Amazon S3 bucket. By using information // that's collected by Amazon Web Services CloudTrail, you can determine the // requests successfully made to Secrets Manager, who made the request, when it was // made, and so on. For more about Amazon Web Services Secrets Manager and support -// for Amazon Web Services CloudTrail, see Logging Amazon Web Services Secrets -// Manager Events with Amazon Web Services CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail) -// in the Amazon Web Services Secrets Manager User Guide. To learn more about -// CloudTrail, including enabling it and find your log files, see the Amazon Web -// Services CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html) -// . +// for Amazon Web Services CloudTrail, see [Logging Amazon Web Services Secrets Manager Events with Amazon Web Services CloudTrail]in the Amazon Web Services Secrets +// Manager User Guide. To learn more about CloudTrail, including enabling it and +// find your log files, see the [Amazon Web Services CloudTrail User Guide]. +// +// [Forums Help]: http://forums.aws.amazon.com/help.jspa +// [Amazon Web Services CloudTrail User Guide]: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html +// [Amazon Web Services Secrets Manager endpoints]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/asm_access.html#endpoints +// [Amazon Web Services Secrets Manager User Guide]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/introduction.html +// [Amazon Web Services Secrets Manager Discussion Forum]: http://forums.aws.amazon.com/forum.jspa?forumID=296 +// [Logging Amazon Web Services Secrets Manager Events with Amazon Web Services CloudTrail]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail package secretsmanager diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/endpoints.go index ce51d643984..b1ab339fb02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -541,7 +552,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -571,6 +582,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -580,7 +595,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/go_module_metadata.go index 461158f9bab..385a5b74844 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/go_module_metadata.go @@ -3,4 +3,4 @@ package secretsmanager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.6" +const goModuleVersion = "1.32.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/options.go index a8817523e98..3d6b11409c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -74,17 +79,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -101,8 +109,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -147,6 +156,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/serializers.go index 564feb18664..2ca3bbf3e58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/serializers.go @@ -1766,6 +1766,11 @@ func awsAwsjson11_serializeOpDocumentPutSecretValueInput(v *PutSecretValueInput, ok.String(*v.ClientRequestToken) } + if v.RotationToken != nil { + ok := object.Key("RotationToken") + ok.String(*v.RotationToken) + } + if v.SecretBinary != nil { ok := object.Key("SecretBinary") ok.Base64EncodeBytes(v.SecretBinary) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/enums.go index 84f46d0bd5d..e458ec8fecd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/enums.go @@ -16,8 +16,9 @@ const ( ) // Values returns all known values for FilterNameStringType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FilterNameStringType) Values() []FilterNameStringType { return []FilterNameStringType{ "description", @@ -39,8 +40,9 @@ const ( ) // Values returns all known values for SortOrderType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SortOrderType) Values() []SortOrderType { return []SortOrderType{ "asc", @@ -58,8 +60,9 @@ const ( ) // Values returns all known values for StatusType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StatusType) Values() []StatusType { return []StatusType{ "InSync", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/errors.go index aff130bb247..09c59ed19c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/errors.go @@ -36,8 +36,9 @@ func (e *DecryptionFailure) ErrorFault() smithy.ErrorFault { return smithy.Fault // Secrets Manager can't encrypt the protected secret text using the provided KMS // key. Check that the KMS key is available, enabled, and not in an invalid state. -// For more information, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// . +// For more information, see [Key state: Effect on your KMS key]. +// +// [Key state: Effect on your KMS key]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html type EncryptionFailure struct { Message *string @@ -141,16 +142,20 @@ func (e *InvalidParameterException) ErrorCode() string { } func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// A parameter value is not valid for the current state of the resource. Possible -// causes: +// A parameter value is not valid for the current state of the resource. +// +// Possible causes: +// // - The secret is scheduled for deletion. +// // - You tried to enable rotation on a secret that doesn't already have a Lambda // function ARN configured and you didn't include such an ARN as a parameter in // this call. +// // - The secret is managed by another service, and you must use that service to -// update it. For more information, see Secrets managed by other Amazon Web -// Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html) -// . +// update it. For more information, see [Secrets managed by other Amazon Web Services services]. +// +// [Secrets managed by other Amazon Web Services services]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html type InvalidRequestException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/types.go index e04e17e788c..7b848975a34 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/secretsmanager/types/types.go @@ -8,13 +8,12 @@ import ( ) // The error Secrets Manager encountered while retrieving an individual secret as -// part of BatchGetSecretValue . +// part of BatchGetSecretValue. type APIErrorType struct { // The error Secrets Manager encountered while retrieving an individual secret as - // part of BatchGetSecretValue , for example ResourceNotFoundException , - // InvalidParameterException , InvalidRequestException , DecryptionFailure , or - // AccessDeniedException . + // part of BatchGetSecretValue, for example ResourceNotFoundException , InvalidParameterException , + // InvalidRequestException , DecryptionFailure , or AccessDeniedException . ErrorCode *string // A message describing the error. @@ -27,23 +26,33 @@ type APIErrorType struct { } // Allows you to add filters when you use the search function in Secrets Manager. -// For more information, see Find secrets in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_search-secret.html) -// . +// For more information, see [Find secrets in Secrets Manager]. +// +// [Find secrets in Secrets Manager]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_search-secret.html type Filter struct { // The following are keys you can use: + // // - description: Prefix match, not case-sensitive. + // // - name: Prefix match, case-sensitive. + // // - tag-key: Prefix match, case-sensitive. + // // - tag-value: Prefix match, case-sensitive. + // // - primary-region: Prefix match, case-sensitive. + // // - owning-service: Prefix match, case-sensitive. + // // - all: Breaks the filter value string into words and then searches all // attributes for matches. Not case-sensitive. Key FilterNameStringType - // The keyword to filter for. You can prefix your search value with an exclamation - // mark ( ! ) in order to perform negation filters. + // The keyword to filter for. + // + // You can prefix your search value with an exclamation mark ( ! ) in order to + // perform negation filters. Values []string noSmithyDocumentSerde @@ -56,8 +65,9 @@ type ReplicaRegionType struct { // include this field, Secrets Manager uses aws/secretsmanager . KmsKeyId *string - // A Region code. For a list of Region codes, see Name and code of Regions (https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) - // . + // A Region code. For a list of Region codes, see [Name and code of Regions]. + // + // [Name and code of Regions]: https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints Region *string noSmithyDocumentSerde @@ -94,11 +104,13 @@ type RotationRulesType struct { // must be rotated. If you use this field to set the rotation schedule, Secrets // Manager calculates the next rotation date based on the previous rotation. // Manually updating the secret value by calling PutSecretValue or UpdateSecret is - // considered a valid rotation. In DescribeSecret and ListSecrets , this value is - // calculated from the rotation schedule after every successful rotation. In - // RotateSecret , you can set the rotation schedule in RotationRules with - // AutomaticallyAfterDays or ScheduleExpression , but not both. To set a rotation - // schedule in hours, use ScheduleExpression . + // considered a valid rotation. + // + // In DescribeSecret and ListSecrets , this value is calculated from the rotation + // schedule after every successful rotation. In RotateSecret , you can set the + // rotation schedule in RotationRules with AutomaticallyAfterDays or + // ScheduleExpression , but not both. To set a rotation schedule in hours, use + // ScheduleExpression . AutomaticallyAfterDays *int64 // The length of the rotation window in hours, for example 3h for a three hour @@ -107,39 +119,44 @@ type RotationRulesType struct { // window starts according to the ScheduleExpression . If you don't specify a // Duration , for a ScheduleExpression in hours, the window automatically closes // after one hour. For a ScheduleExpression in days, the window automatically - // closes at the end of the UTC day. For more information, including examples, see - // Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html) + // closes at the end of the UTC day. For more information, including examples, see [Schedule expressions in Secrets Manager rotation] // in the Secrets Manager Users Guide. + // + // [Schedule expressions in Secrets Manager rotation]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html Duration *string // A cron() or rate() expression that defines the schedule for rotating your // secret. Secrets Manager rotation schedules use UTC time zone. Secrets Manager - // rotates your secret any time during a rotation window. Secrets Manager rate() - // expressions represent the interval in hours or days that you want to rotate your - // secret, for example rate(12 hours) or rate(10 days) . You can rotate a secret as - // often as every four hours. If you use a rate() expression, the rotation window - // starts at midnight. For a rate in hours, the default rotation window closes - // after one hour. For a rate in days, the default rotation window closes at the - // end of the day. You can set the Duration to change the rotation window. The - // rotation window must not extend into the next UTC day or into the next rotation - // window. You can use a cron() expression to create a rotation schedule that is - // more detailed than a rotation interval. For more information, including - // examples, see Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html) - // in the Secrets Manager Users Guide. For a cron expression that represents a + // rotates your secret any time during a rotation window. + // + // Secrets Manager rate() expressions represent the interval in hours or days that + // you want to rotate your secret, for example rate(12 hours) or rate(10 days) . + // You can rotate a secret as often as every four hours. If you use a rate() + // expression, the rotation window starts at midnight. For a rate in hours, the + // default rotation window closes after one hour. For a rate in days, the default + // rotation window closes at the end of the day. You can set the Duration to + // change the rotation window. The rotation window must not extend into the next + // UTC day or into the next rotation window. + // + // You can use a cron() expression to create a rotation schedule that is more + // detailed than a rotation interval. For more information, including examples, see + // [Schedule expressions in Secrets Manager rotation]in the Secrets Manager Users Guide. For a cron expression that represents a // schedule in hours, the default rotation window closes after one hour. For a cron // expression that represents a schedule in days, the default rotation window // closes at the end of the day. You can set the Duration to change the rotation // window. The rotation window must not extend into the next UTC day or into the // next rotation window. + // + // [Schedule expressions in Secrets Manager rotation]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html ScheduleExpression *string noSmithyDocumentSerde } // A structure that contains the details about a secret. It does not include the -// encrypted SecretString and SecretBinary values. To get those values, use -// GetSecretValue (https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html) -// . +// encrypted SecretString and SecretBinary values. To get those values, use [GetSecretValue] . +// +// [GetSecretValue]: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html type SecretListEntry struct { // The Amazon Resource Name (ARN) of the secret. @@ -150,9 +167,10 @@ type SecretListEntry struct { // The date and time the deletion of the secret occurred. Not present on active // secrets. The secret can be recovered until the number of days in the recovery - // window has passed, as specified in the RecoveryWindowInDays parameter of the - // DeleteSecret (https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html) - // operation. + // window has passed, as specified in the RecoveryWindowInDays parameter of the [DeleteSecret] + // DeleteSecret operation. + // + // [DeleteSecret]: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html DeletedDate *time.Time // The user-provided description of the secret. @@ -193,8 +211,9 @@ type SecretListEntry struct { // The ARN of an Amazon Web Services Lambda function invoked by Secrets Manager to // rotate and expire the secret either automatically per the schedule or manually - // by a call to RotateSecret (https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_RotateSecret.html) - // . + // by a call to [RotateSecret]RotateSecret . + // + // [RotateSecret]: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_RotateSecret.html RotationLambdaARN *string // A structure that defines the rotation configuration for the secret. @@ -202,15 +221,17 @@ type SecretListEntry struct { // A list of all of the currently assigned SecretVersionStage staging labels and // the SecretVersionId attached to each one. Staging labels are used to keep track - // of the different versions during the rotation process. A version that does not - // have any SecretVersionStage is considered deprecated and subject to deletion. - // Such versions are not included in this list. + // of the different versions during the rotation process. + // + // A version that does not have any SecretVersionStage is considered deprecated + // and subject to deletion. Such versions are not included in this list. SecretVersionsToStages map[string][]string // The list of user-defined tags associated with the secret. To add tags to a - // secret, use TagResource (https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_TagResource.html) - // . To remove tags, use UntagResource (https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_UntagResource.html) - // . + // secret, use [TagResource]TagResource . To remove tags, use [UntagResource]UntagResource . + // + // [TagResource]: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_TagResource.html + // [UntagResource]: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_UntagResource.html Tags []Tag noSmithyDocumentSerde @@ -230,7 +251,9 @@ type SecretValueEntry struct { // The decrypted secret value, if the secret value was originally provided as // binary data in the form of a byte array. The parameter represents the binary - // data as a base64-encoded (https://tools.ietf.org/html/rfc4648#section-4) string. + // data as a [base64-encoded]string. + // + // [base64-encoded]: https://tools.ietf.org/html/rfc4648#section-4 SecretBinary []byte // The decrypted secret value, if the secret value was originally provided as a diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md index 517b98d50ec..c817608fbbd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md @@ -1,3 +1,62 @@ +# v1.34.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.33.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.7 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.6 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.5 (2024-06-06) + +* **Documentation**: Doc only updates for SQS. These updates include customer-reported issues and TCX3 modifications. + +# v1.32.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.3 (2024-05-23) + +* No change notes available for this release. + +# v1.32.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2024-05-08) + +* **Feature**: This release adds MessageSystemAttributeNames to ReceiveMessageRequest to replace AttributeNames. +* **Bug Fix**: GoDoc improvement + # v1.31.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go index 9dd98599f8c..e6bf506c17b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -31,6 +34,9 @@ const ServiceAPIVersion = "2012-11-05" // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -69,6 +75,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -230,15 +238,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -442,6 +451,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -485,6 +518,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go index 52e7b957680..0577750aa0f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go @@ -10,25 +10,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds a permission to a queue for a specific principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) -// . This allows sharing access to the queue. When you create a queue, you have -// full control access rights for the queue. Only you, the owner of the queue, can -// grant or deny permissions to the queue. For more information about these -// permissions, see Allow Developers to Write Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon SQS Developer Guide. -// - AddPermission generates a policy for you. You can use SetQueueAttributes to -// upload your policy. For more information, see Using Custom Policies with the -// Amazon SQS Access Policy Language (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) -// in the Amazon SQS Developer Guide. +// Adds a permission to a queue for a specific [principal]. This allows sharing access to the +// queue. +// +// When you create a queue, you have full control access rights for the queue. +// Only you, the owner of the queue, can grant or deny permissions to the queue. +// For more information about these permissions, see [Allow Developers to Write Messages to a Shared Queue]in the Amazon SQS Developer +// Guide. +// +// - AddPermission generates a policy for you. You can use SetQueueAttributesto upload your +// policy. For more information, see [Using Custom Policies with the Amazon SQS Access Policy Language]in the Amazon SQS Developer Guide. +// // - An Amazon SQS policy can have a maximum of seven actions per statement. +// // - To remove the ability to change queue permissions, you must deny permission // to the AddPermission , RemovePermission , and SetQueueAttributes actions in // your IAM policy. +// // - Amazon SQS AddPermission does not support adding a non-account principal. // -// Cross-account permissions don't apply to this action. For more information, see -// Grant cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [principal]: https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P +// [Allow Developers to Write Messages to a Shared Queue]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue +// [Using Custom Policies with the Amazon SQS Access Policy Language]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) AddPermission(ctx context.Context, params *AddPermissionInput, optFns ...func(*Options)) (*AddPermissionOutput, error) { if params == nil { params = &AddPermissionInput{} @@ -46,22 +53,28 @@ func (c *Client) AddPermission(ctx context.Context, params *AddPermissionInput, type AddPermissionInput struct { - // The Amazon Web Services account numbers of the principals (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) - // who are to receive permission. For information about locating the Amazon Web - // Services account identification, see Your Amazon Web Services Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) - // in the Amazon SQS Developer Guide. + // The Amazon Web Services account numbers of the [principals] who are to receive permission. + // For information about locating the Amazon Web Services account identification, + // see [Your Amazon Web Services Identifiers]in the Amazon SQS Developer Guide. + // + // [Your Amazon Web Services Identifiers]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication + // [principals]: https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P // // This member is required. AWSAccountIds []string // The action the client wants to allow for the specified principal. Valid values: - // the name of any action or * . For more information about these actions, see - // Overview of Managing Access Permissions to Your Amazon Simple Queue Service - // Resource (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html) - // in the Amazon SQS Developer Guide. Specifying SendMessage , DeleteMessage , or - // ChangeMessageVisibility for ActionName.n also grants permissions for the - // corresponding batch versions of those actions: SendMessageBatch , - // DeleteMessageBatch , and ChangeMessageVisibilityBatch . + // the name of any action or * . + // + // For more information about these actions, see [Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource] in the Amazon SQS Developer + // Guide. + // + // Specifying SendMessage , DeleteMessage , or ChangeMessageVisibility for + // ActionName.n also grants permissions for the corresponding batch versions of + // those actions: SendMessageBatch , DeleteMessageBatch , and + // ChangeMessageVisibilityBatch . + // + // [Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html // // This member is required. Actions []string @@ -73,8 +86,9 @@ type AddPermissionInput struct { // This member is required. Label *string - // The URL of the Amazon SQS queue to which permissions are added. Queue URLs and - // names are case-sensitive. + // The URL of the Amazon SQS queue to which permissions are added. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -144,6 +158,12 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAddPermissionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go index 783b7a8f395..c88b798a5c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go @@ -14,14 +14,16 @@ import ( // cancelled when the current status is RUNNING. Cancelling a message movement task // does not revert the messages that have already been moved. It can only stop the // messages that have not been moved yet. -// - This action is currently limited to supporting message redrive from -// dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// only. In this context, the source queue is the dead-letter queue (DLQ), while -// the destination queue can be the original source queue (from which the messages -// were driven to the dead-letter-queue), or a custom destination queue. -// - Currently, only standard queues are supported. +// +// - This action is currently limited to supporting message redrive from [dead-letter queues (DLQs)]only. +// In this context, the source queue is the dead-letter queue (DLQ), while the +// destination queue can be the original source queue (from which the messages were +// driven to the dead-letter-queue), or a custom destination queue. +// // - Only one active message movement task is supported per queue at any given // time. +// +// [dead-letter queues (DLQs)]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html func (c *Client) CancelMessageMoveTask(ctx context.Context, params *CancelMessageMoveTaskInput, optFns ...func(*Options)) (*CancelMessageMoveTaskOutput, error) { if params == nil { params = &CancelMessageMoveTaskInput{} @@ -113,6 +115,12 @@ func (c *Client) addOperationCancelMessageMoveTaskMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCancelMessageMoveTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go index d7339c67d00..163f0807622 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go @@ -12,18 +12,23 @@ import ( // Changes the visibility timeout of a specified message in a queue to a new // value. The default visibility timeout for a message is 30 seconds. The minimum -// is 0 seconds. The maximum is 12 hours. For more information, see Visibility -// Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon SQS Developer Guide. For example, if the default timeout for a -// queue is 60 seconds, 15 seconds have elapsed since you received the message, and -// you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 -// seconds, the 10 seconds begin to count from the time that you make the -// ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout -// or to delete that message 10 seconds after you initially change the visibility -// timeout (a total of 25 seconds) might result in an error. An Amazon SQS message -// has three basic states: +// is 0 seconds. The maximum is 12 hours. For more information, see [Visibility Timeout]in the Amazon +// SQS Developer Guide. +// +// For example, if the default timeout for a queue is 60 seconds, 15 seconds have +// elapsed since you received the message, and you send a ChangeMessageVisibility +// call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count +// from the time that you make the ChangeMessageVisibility call. Thus, any attempt +// to change the visibility timeout or to delete that message 10 seconds after you +// initially change the visibility timeout (a total of 25 seconds) might result in +// an error. +// +// An Amazon SQS message has three basic states: +// // - Sent to a queue by a producer. +// // - Received from the queue by a consumer. +// // - Deleted from the queue. // // A message is considered to be stored after it is sent to a queue by a producer, @@ -31,26 +36,36 @@ import ( // 2). There is no limit to the number of stored messages. A message is considered // to be in flight after it is received from a queue by a consumer, but not yet // deleted from the queue (that is, between states 2 and 3). There is a limit to -// the number of in flight messages. Limits that apply to in flight messages are -// unrelated to the unlimited number of stored messages. For most standard queues -// (depending on queue traffic and message backlog), there can be a maximum of -// approximately 120,000 in flight messages (received from a queue by a consumer, -// but not yet deleted from the queue). If you reach this limit, Amazon SQS returns -// the OverLimit error message. To avoid reaching the limit, you should delete -// messages from the queue after they're processed. You can also increase the -// number of queues you use to process your messages. To request a limit increase, -// file a support request (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs) -// . For FIFO queues, there can be a maximum of 20,000 in flight messages (received +// the number of in flight messages. +// +// Limits that apply to in flight messages are unrelated to the unlimited number +// of stored messages. +// +// For most standard queues (depending on queue traffic and message backlog), +// there can be a maximum of approximately 120,000 in flight messages (received +// from a queue by a consumer, but not yet deleted from the queue). If you reach +// this limit, Amazon SQS returns the OverLimit error message. To avoid reaching +// the limit, you should delete messages from the queue after they're processed. +// You can also increase the number of queues you use to process your messages. To +// request a limit increase, [file a support request]. +// +// For FIFO queues, there can be a maximum of 20,000 in flight messages (received // from a queue by a consumer, but not yet deleted from the queue). If you reach -// this limit, Amazon SQS returns no error messages. If you attempt to set the -// VisibilityTimeout to a value greater than the maximum time left, Amazon SQS -// returns an error. Amazon SQS doesn't automatically recalculate and increase the -// timeout to the maximum remaining time. Unlike with a queue, when you change the -// visibility timeout for a specific message the timeout value is applied -// immediately but isn't saved in memory for that message. If you don't delete a -// message after it is received, the visibility timeout for the message reverts to -// the original timeout value (not to the value you set using the -// ChangeMessageVisibility action) the next time the message is received. +// this limit, Amazon SQS returns no error messages. +// +// If you attempt to set the VisibilityTimeout to a value greater than the maximum +// time left, Amazon SQS returns an error. Amazon SQS doesn't automatically +// recalculate and increase the timeout to the maximum remaining time. +// +// Unlike with a queue, when you change the visibility timeout for a specific +// message the timeout value is applied immediately but isn't saved in memory for +// that message. If you don't delete a message after it is received, the visibility +// timeout for the message reverts to the original timeout value (not to the value +// you set using the ChangeMessageVisibility action) the next time the message is +// received. +// +// [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html +// [file a support request]: https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs func (c *Client) ChangeMessageVisibility(ctx context.Context, params *ChangeMessageVisibilityInput, optFns ...func(*Options)) (*ChangeMessageVisibilityOutput, error) { if params == nil { params = &ChangeMessageVisibilityInput{} @@ -68,14 +83,15 @@ func (c *Client) ChangeMessageVisibility(ctx context.Context, params *ChangeMess type ChangeMessageVisibilityInput struct { - // The URL of the Amazon SQS queue whose message's visibility is changed. Queue - // URLs and names are case-sensitive. + // The URL of the Amazon SQS queue whose message's visibility is changed. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string // The receipt handle associated with the message, whose visibility timeout is - // changed. This parameter is returned by the ReceiveMessage action. + // changed. This parameter is returned by the ReceiveMessageaction. // // This member is required. ReceiptHandle *string @@ -151,6 +167,12 @@ func (c *Client) addOperationChangeMessageVisibilityMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpChangeMessageVisibilityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go index ac359caf156..2b031c90585 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go @@ -11,13 +11,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Changes the visibility timeout of multiple messages. This is a batch version of -// ChangeMessageVisibility . The result of the action on each message is reported -// individually in the response. You can send up to 10 ChangeMessageVisibility -// requests with each ChangeMessageVisibilityBatch action. Because the batch -// request can result in a combination of successful and unsuccessful actions, you -// should check for batch errors even when the call returns an HTTP status code of -// 200 . +// Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility +// . The result of the action on each message is reported individually in the +// response. You can send up to 10 ChangeMessageVisibilityrequests with each ChangeMessageVisibilityBatch +// action. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200 . func (c *Client) ChangeMessageVisibilityBatch(ctx context.Context, params *ChangeMessageVisibilityBatchInput, optFns ...func(*Options)) (*ChangeMessageVisibilityBatchOutput, error) { if params == nil { params = &ChangeMessageVisibilityBatchInput{} @@ -41,8 +42,9 @@ type ChangeMessageVisibilityBatchInput struct { // This member is required. Entries []types.ChangeMessageVisibilityBatchRequestEntry - // The URL of the Amazon SQS queue whose messages' visibility is changed. Queue - // URLs and names are case-sensitive. + // The URL of the Amazon SQS queue whose messages' visibility is changed. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -50,9 +52,8 @@ type ChangeMessageVisibilityBatchInput struct { noSmithyDocumentSerde } -// For each message in the batch, the response contains a -// ChangeMessageVisibilityBatchResultEntry tag if the message succeeds or a -// BatchResultErrorEntry tag if the message fails. +// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry tag if the message +// succeeds or a BatchResultErrorEntrytag if the message fails. type ChangeMessageVisibilityBatchOutput struct { // A list of BatchResultErrorEntry items. @@ -126,6 +127,12 @@ func (c *Client) addOperationChangeMessageVisibilityBatchMiddlewares(stack *midd if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpChangeMessageVisibilityBatchValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go index 35e691ff5b3..4d8d1c75af6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go @@ -12,33 +12,46 @@ import ( // Creates a new standard or FIFO queue. You can pass one or more attributes in // the request. Keep the following in mind: +// // - If you don't specify the FifoQueue attribute, Amazon SQS creates a standard -// queue. You can't change the queue type after you create it and you can't convert -// an existing standard queue into a FIFO queue. You must either create a new FIFO -// queue for your application or delete your existing standard queue and recreate -// it as a FIFO queue. For more information, see Moving From a Standard Queue to -// a FIFO Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) -// in the Amazon SQS Developer Guide. -// - If you don't provide a value for an attribute, the queue is created with -// the default value for the attribute. -// - If you delete a queue, you must wait at least 60 seconds before creating a -// queue with the same name. +// queue. +// +// You can't change the queue type after you create it and you can't convert an +// +// existing standard queue into a FIFO queue. You must either create a new FIFO +// queue for your application or delete your existing standard queue and recreate +// it as a FIFO queue. For more information, see [Moving From a Standard Queue to a FIFO Queue]in the Amazon SQS Developer +// Guide. +// +// - If you don't provide a value for an attribute, the queue is created with +// the default value for the attribute. +// +// - If you delete a queue, you must wait at least 60 seconds before creating a +// queue with the same name. // // To successfully create a new queue, you must provide a queue name that adheres -// to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) -// and is unique within the scope of your queues. After you create a queue, you -// must wait at least one second after the queue is created to be able to use the -// queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires -// only the QueueName parameter. be aware of existing queue names: +// to the [limits related to queues]and is unique within the scope of your queues. +// +// After you create a queue, you must wait at least one second after the queue is +// created to be able to use the queue. +// +// To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. +// be aware of existing queue names: +// // - If you provide the name of an existing queue along with the exact names and // values of all the queue's attributes, CreateQueue returns the queue URL for // the existing queue. +// // - If the queue name, attribute names, or attribute values don't match an // existing queue, CreateQueue returns an error. // -// Cross-account permissions don't apply to this action. For more information, see -// Grant cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [limits related to queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name +// +// [Moving From a Standard Queue to a FIFO Queue]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving func (c *Client) CreateQueue(ctx context.Context, params *CreateQueueInput, optFns ...func(*Options)) (*CreateQueueOutput, error) { if params == nil { params = &CreateQueueInput{} @@ -57,23 +70,31 @@ func (c *Client) CreateQueue(ctx context.Context, params *CreateQueueInput, optF type CreateQueueInput struct { // The name of the new queue. The following limits apply to this name: + // // - A queue name can have up to 80 characters. + // // - Valid values: alphanumeric characters, hyphens ( - ), and underscores ( _ ). + // // - A FIFO queue name must end with the .fifo suffix. + // // Queue URLs and names are case-sensitive. // // This member is required. QueueName *string - // A map of attributes with their corresponding values. The following lists the - // names, descriptions, and values of the special request parameters that the - // CreateQueue action uses: + // A map of attributes with their corresponding values. + // + // The following lists the names, descriptions, and values of the special request + // parameters that the CreateQueue action uses: + // // - DelaySeconds – The length of time, in seconds, for which the delivery of all // messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds // (15 minutes). Default: 0. + // // - MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) // to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). + // // - MessageRetentionPeriod – The length of time, in seconds, for which Amazon // SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to // 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you change a @@ -83,129 +104,181 @@ type CreateQueueInput struct { // existing messages in the queue potentially causing them to be expired and // deleted if the MessageRetentionPeriod is reduced below the age of existing // messages. + // // - Policy – The queue's policy. A valid Amazon Web Services policy. For more - // information about policy structure, see Overview of Amazon Web Services IAM - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the IAM User Guide. - // - ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a - // ReceiveMessage action waits for a message to arrive. Valid values: An integer - // from 0 to 20 (seconds). Default: 0. + // information about policy structure, see [Overview of Amazon Web Services IAM Policies]in the IAM User Guide. + // + // - ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage + // action waits for a message to arrive. Valid values: An integer from 0 to 20 + // (seconds). Default: 0. + // // - VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid // values: An integer from 0 to 43,200 (12 hours). Default: 30. For more - // information about the visibility timeout, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. - // The following attributes apply only to dead-letter queues: (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // information about the visibility timeout, see [Visibility Timeout]in the Amazon SQS Developer + // Guide. + // + // The following attributes apply only to [dead-letter queues:] + // // - RedrivePolicy – The string that includes the parameters for the dead-letter // queue functionality of the source queue as a JSON object. The parameters are as // follows: + // // - deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter // queue to which Amazon SQS moves messages after the value of maxReceiveCount is // exceeded. + // // - maxReceiveCount – The number of times a message is delivered to the source // queue before being moved to the dead-letter queue. Default: 10. When the // ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS // moves the message to the dead-letter-queue. + // // - RedriveAllowPolicy – The string that includes the parameters for the // permissions for the dead-letter queue redrive permission and which source queues // can specify dead-letter queues as a JSON object. The parameters are as follows: + // // - redrivePermission – The permission type that defines which source queues can // specify the current queue as the dead-letter queue. Valid values are: + // // - allowAll – (Default) Any source queues in this Amazon Web Services account // in the same Region can specify this queue as the dead-letter queue. + // // - denyAll – No source queues can specify this queue as the dead-letter queue. + // // - byQueue – Only queues specified by the sourceQueueArns parameter can specify // this queue as the dead-letter queue. + // // - sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that // can specify this queue as the dead-letter queue and redrive messages. You can // specify this parameter only when the redrivePermission parameter is set to // byQueue . You can specify up to 10 source queue ARNs. To allow more than 10 // source queues to specify dead-letter queues, set the redrivePermission // parameter to allowAll . + // // The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the - // dead-letter queue of a standard queue must also be a standard queue. The - // following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) - // : + // dead-letter queue of a standard queue must also be a standard queue. + // + // The following attributes apply only to [server-side-encryption]: + // // - KmsMasterKeyId – The ID of an Amazon Web Services managed customer master - // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms) - // . While the alias of the Amazon Web Services managed CMK for Amazon SQS is - // always alias/aws/sqs , the alias of a custom CMK can, for example, be - // alias/MyAlias . For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the Key Management Service API Reference. + // key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms]. While + // the alias of the Amazon Web Services managed CMK for Amazon SQS is always + // alias/aws/sqs , the alias of a custom CMK can, for example, be alias/MyAlias + // . For more examples, see [KeyId]in the Key Management Service API Reference. + // // - KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling KMS again. An integer representing - // seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: - // 300 (5 minutes). A shorter time period provides better security but results in - // more calls to KMS which might incur charges after Free Tier. For more - // information, see How Does the Data Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work) + // Amazon SQS can reuse a [data key]to encrypt or decrypt messages before calling KMS + // again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 + // seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides + // better security but results in more calls to KMS which might incur charges after + // Free Tier. For more information, see [How Does the Data Key Reuse Period Work?] + // // - SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned // encryption keys. Only one server-side encryption option is supported per queue - // (for example, SSE-KMS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html) - // or SSE-SQS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html) - // ). - // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) - // : + // (for example, [SSE-KMS]or [SSE-SQS]). + // + // The following attributes apply only to [FIFO (first-in-first-out) queues]: + // // - FifoQueue – Designates a queue as FIFO. Valid values are true and false . If // you don't specify the FifoQueue attribute, Amazon SQS creates a standard // queue. You can provide this attribute only during queue creation. You can't // change it for an existing queue. When you set this attribute, you must also - // provide the MessageGroupId for your messages explicitly. For more information, - // see FIFO queue logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html) - // in the Amazon SQS Developer Guide. + // provide the MessageGroupId for your messages explicitly. + // + // For more information, see [FIFO queue logic]in the Amazon SQS Developer Guide. + // // - ContentBasedDeduplication – Enables content-based deduplication. Valid - // values are true and false . For more information, see Exactly-once processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon SQS Developer Guide. Note the following: + // values are true and false . For more information, see [Exactly-once processing]in the Amazon SQS + // Developer Guide. Note the following: + // // - Every message must have a unique MessageDeduplicationId . + // // - You may provide a MessageDeduplicationId explicitly. + // // - If you aren't able to provide a MessageDeduplicationId and you enable // ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to // generate the MessageDeduplicationId using the body of the message (but not the // attributes of the message). + // // - If you don't provide a MessageDeduplicationId and the queue doesn't have // ContentBasedDeduplication set, the action fails with an error. + // // - If the queue has ContentBasedDeduplication set, your MessageDeduplicationId // overrides the generated one. + // // - When ContentBasedDeduplication is in effect, messages with identical content // sent within the deduplication interval are treated as duplicates and only one // copy of the message is delivered. + // // - If you send one message with ContentBasedDeduplication enabled and then // another message with a MessageDeduplicationId that is the same as the one // generated for the first MessageDeduplicationId , the two messages are treated // as duplicates and only one copy of the message is delivered. - // The following attributes apply only to high throughput for FIFO queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) - // : + // + // The following attributes apply only to [high throughput for FIFO queues]: + // // - DeduplicationScope – Specifies whether message deduplication occurs at the // message group or queue level. Valid values are messageGroup and queue . + // // - FifoThroughputLimit – Specifies whether the FIFO queue throughput quota // applies to the entire queue or per message group. Valid values are perQueue // and perMessageGroupId . The perMessageGroupId value is allowed only when the // value for DeduplicationScope is messageGroup . + // // To enable high throughput for FIFO queues, do the following: + // // - Set DeduplicationScope to messageGroup . + // // - Set FifoThroughputLimit to perMessageGroupId . + // // If you set these attributes to anything other than the values shown for // enabling high throughput, normal throughput is in effect and deduplication - // occurs as specified. For information on throughput quotas, see Quotas related - // to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon SQS Developer Guide. + // occurs as specified. + // + // For information on throughput quotas, see [Quotas related to messages] in the Amazon SQS Developer Guide. + // + // [SSE-KMS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html + // [data key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys + // [How Does the Data Key Reuse Period Work?]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work + // [SSE-SQS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html + // [high throughput for FIFO queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html + // [Overview of Amazon Web Services IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html + // [dead-letter queues:]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html + // [Exactly-once processing]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html + // [KeyId]: https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters + // [Quotas related to messages]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html + // [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html + // [Key Terms]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms + // [server-side-encryption]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html + // [FIFO queue logic]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html + // [FIFO (first-in-first-out) queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html Attributes map[string]string // Add cost allocation tags to the specified Amazon SQS queue. For an overview, - // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) - // in the Amazon SQS Developer Guide. When you use queue tags, keep the following - // guidelines in mind: + // see [Tagging Your Amazon SQS Queues]in the Amazon SQS Developer Guide. + // + // When you use queue tags, keep the following guidelines in mind: + // // - Adding more than 50 tags to a queue isn't recommended. + // // - Tags don't have any semantic meaning. Amazon SQS interprets tags as // character strings. + // // - Tags are case-sensitive. + // // - A new tag with a key identical to that of an existing tag overwrites the // existing tag. - // For a full list of tag restrictions, see Quotas related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) - // in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you - // must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account - // permissions don't apply to this action. For more information, see Grant - // cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) + // + // For a full list of tag restrictions, see [Quotas related to queues] in the Amazon SQS Developer Guide. + // + // To be able to tag a queue on creation, you must have the sqs:CreateQueue and + // sqs:TagQueue permissions. + // + // Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. + // + // [Tagging Your Amazon SQS Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html + // [Quotas related to queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues + // [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name Tags map[string]string noSmithyDocumentSerde @@ -278,6 +351,12 @@ func (c *Client) addOperationCreateQueueMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateQueueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go index 2e3011be79d..c365e5a30f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go @@ -15,18 +15,20 @@ import ( // receive when you send the message). Amazon SQS can delete a message from a queue // even if a visibility timeout setting causes the message to be locked by another // consumer. Amazon SQS automatically deletes messages left in a queue longer than -// the retention period configured for the queue. The ReceiptHandle is associated -// with a specific instance of receiving a message. If you receive a message more -// than once, the ReceiptHandle is different each time you receive a message. When -// you use the DeleteMessage action, you must provide the most recently received -// ReceiptHandle for the message (otherwise, the request succeeds, but the message -// will not be deleted). For standard queues, it is possible to receive a message -// even after you delete it. This might happen on rare occasions if one of the -// servers which stores a copy of the message is unavailable when you send the -// request to delete the message. The copy remains on the server and might be -// returned to you during a subsequent receive request. You should ensure that your -// application is idempotent, so that receiving a message more than once does not -// cause issues. +// the retention period configured for the queue. +// +// The ReceiptHandle is associated with a specific instance of receiving a +// message. If you receive a message more than once, the ReceiptHandle is +// different each time you receive a message. When you use the DeleteMessage +// action, you must provide the most recently received ReceiptHandle for the +// message (otherwise, the request succeeds, but the message will not be deleted). +// +// For standard queues, it is possible to receive a message even after you delete +// it. This might happen on rare occasions if one of the servers which stores a +// copy of the message is unavailable when you send the request to delete the +// message. The copy remains on the server and might be returned to you during a +// subsequent receive request. You should ensure that your application is +// idempotent, so that receiving a message more than once does not cause issues. func (c *Client) DeleteMessage(ctx context.Context, params *DeleteMessageInput, optFns ...func(*Options)) (*DeleteMessageOutput, error) { if params == nil { params = &DeleteMessageInput{} @@ -44,8 +46,9 @@ func (c *Client) DeleteMessage(ctx context.Context, params *DeleteMessageInput, type DeleteMessageInput struct { - // The URL of the Amazon SQS queue from which messages are deleted. Queue URLs and - // names are case-sensitive. + // The URL of the Amazon SQS queue from which messages are deleted. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -120,6 +123,12 @@ func (c *Client) addOperationDeleteMessageMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go index 36834805d0a..8b35d180b34 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go @@ -11,11 +11,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes up to ten messages from the specified queue. This is a batch version of -// DeleteMessage . The result of the action on each message is reported -// individually in the response. Because the batch request can result in a -// combination of successful and unsuccessful actions, you should check for batch -// errors even when the call returns an HTTP status code of 200 . +// Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage +// . The result of the action on each message is reported individually in the +// response. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200 . func (c *Client) DeleteMessageBatch(ctx context.Context, params *DeleteMessageBatchInput, optFns ...func(*Options)) (*DeleteMessageBatchOutput, error) { if params == nil { params = &DeleteMessageBatchInput{} @@ -38,8 +40,9 @@ type DeleteMessageBatchInput struct { // This member is required. Entries []types.DeleteMessageBatchRequestEntry - // The URL of the Amazon SQS queue from which messages are deleted. Queue URLs and - // names are case-sensitive. + // The URL of the Amazon SQS queue from which messages are deleted. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -47,9 +50,8 @@ type DeleteMessageBatchInput struct { noSmithyDocumentSerde } -// For each message in the batch, the response contains a -// DeleteMessageBatchResultEntry tag if the message is deleted or a -// BatchResultErrorEntry tag if the message can't be deleted. +// For each message in the batch, the response contains a DeleteMessageBatchResultEntry tag if the message is +// deleted or a BatchResultErrorEntrytag if the message can't be deleted. type DeleteMessageBatchOutput struct { // A list of BatchResultErrorEntry items. @@ -123,6 +125,12 @@ func (c *Client) addOperationDeleteMessageBatchMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteMessageBatchValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go index 2fa662b83f0..c25d7664d30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go @@ -10,17 +10,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the queue specified by the QueueUrl , regardless of the queue's -// contents. Be careful with the DeleteQueue action: When you delete a queue, any -// messages in the queue are no longer available. When you delete a queue, the -// deletion process takes up to 60 seconds. Requests you send involving that queue -// during the 60 seconds might succeed. For example, a SendMessage request might -// succeed, but after 60 seconds the queue and the message you sent no longer -// exist. When you delete a queue, you must wait at least 60 seconds before -// creating a queue with the same name. Cross-account permissions don't apply to -// this action. For more information, see Grant cross-account permissions to a -// role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon SQS Developer Guide. The delete operation uses the HTTP GET verb. +// Deletes the queue specified by the QueueUrl , regardless of the queue's contents. +// +// Be careful with the DeleteQueue action: When you delete a queue, any messages +// in the queue are no longer available. +// +// When you delete a queue, the deletion process takes up to 60 seconds. Requests +// you send involving that queue during the 60 seconds might succeed. For example, +// a SendMessagerequest might succeed, but after 60 seconds the queue and the message you +// sent no longer exist. +// +// When you delete a queue, you must wait at least 60 seconds before creating a +// queue with the same name. +// +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] +// in the Amazon SQS Developer Guide. +// +// The delete operation uses the HTTP GET verb. +// +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) DeleteQueue(ctx context.Context, params *DeleteQueueInput, optFns ...func(*Options)) (*DeleteQueueOutput, error) { if params == nil { params = &DeleteQueueInput{} @@ -38,8 +46,9 @@ func (c *Client) DeleteQueue(ctx context.Context, params *DeleteQueueInput, optF type DeleteQueueInput struct { - // The URL of the Amazon SQS queue to delete. Queue URLs and names are - // case-sensitive. + // The URL of the Amazon SQS queue to delete. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -109,6 +118,12 @@ func (c *Client) addOperationDeleteQueueMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDeleteQueueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go index 6f90d7352f5..5b64a2bd1b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go @@ -11,8 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets attributes for the specified queue. To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) -// , you can check whether QueueName ends with the .fifo suffix. +// Gets attributes for the specified queue. +// +// To determine whether a queue is [FIFO], you can check whether QueueName ends with the +// .fifo suffix. +// +// [FIFO]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html func (c *Client) GetQueueAttributes(ctx context.Context, params *GetQueueAttributesInput, optFns ...func(*Options)) (*GetQueueAttributesOutput, error) { if params == nil { params = &GetQueueAttributesInput{} @@ -30,40 +34,55 @@ func (c *Client) GetQueueAttributes(ctx context.Context, params *GetQueueAttribu type GetQueueAttributesInput struct { - // The URL of the Amazon SQS queue whose attribute information is retrieved. Queue - // URLs and names are case-sensitive. + // The URL of the Amazon SQS queue whose attribute information is retrieved. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string - // A list of attributes for which to retrieve information. The AttributeNames - // parameter is optional, but if you don't specify values for this parameter, the - // request returns empty results. In the future, new attributes might be added. If - // you write code that calls this action, we recommend that you structure your code - // so that it can handle new attributes gracefully. The following attributes are - // supported: The ApproximateNumberOfMessagesDelayed , - // ApproximateNumberOfMessagesNotVisible , and ApproximateNumberOfMessages metrics - // may not achieve consistency until at least 1 minute after the producers stop - // sending messages. This period is required for the queue metadata to reach - // eventual consistency. + // A list of attributes for which to retrieve information. + // + // The AttributeNames parameter is optional, but if you don't specify values for + // this parameter, the request returns empty results. + // + // In the future, new attributes might be added. If you write code that calls this + // action, we recommend that you structure your code so that it can handle new + // attributes gracefully. + // + // The following attributes are supported: + // + // The ApproximateNumberOfMessagesDelayed , ApproximateNumberOfMessagesNotVisible , + // and ApproximateNumberOfMessages metrics may not achieve consistency until at + // least 1 minute after the producers stop sending messages. This period is + // required for the queue metadata to reach eventual consistency. + // // - All – Returns all values. + // // - ApproximateNumberOfMessages – Returns the approximate number of messages // available for retrieval from the queue. + // // - ApproximateNumberOfMessagesDelayed – Returns the approximate number of // messages in the queue that are delayed and not available for reading // immediately. This can happen when the queue is configured as a delay queue or // when a message has been sent with a delay parameter. + // // - ApproximateNumberOfMessagesNotVisible – Returns the approximate number of // messages that are in flight. Messages are considered to be in flight if they // have been sent to a client but have not yet been deleted or have not yet reached // the end of their visibility window. - // - CreatedTimestamp – Returns the time when the queue was created in seconds ( - // epoch time (http://en.wikipedia.org/wiki/Unix_time) ). + // + // - CreatedTimestamp – Returns the time when the queue was created in seconds ([epoch time] + // ). + // // - DelaySeconds – Returns the default delay on the queue in seconds. + // // - LastModifiedTimestamp – Returns the time when the queue was last changed in - // seconds ( epoch time (http://en.wikipedia.org/wiki/Unix_time) ). + // seconds ([epoch time] ). + // // - MaximumMessageSize – Returns the limit of how many bytes a message can // contain before Amazon SQS rejects it. + // // - MessageRetentionPeriod – Returns the length of time, in seconds, for which // Amazon SQS retains a message. When you change a queue's attributes, the change // can take up to 60 seconds for most of the attributes to propagate throughout the @@ -71,83 +90,119 @@ type GetQueueAttributesInput struct { // take up to 15 minutes and will impact existing messages in the queue potentially // causing them to be expired and deleted if the MessageRetentionPeriod is // reduced below the age of existing messages. + // // - Policy – Returns the policy of the queue. + // // - QueueArn – Returns the Amazon resource name (ARN) of the queue. + // // - ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for // which the ReceiveMessage action waits for a message to arrive. + // // - VisibilityTimeout – Returns the visibility timeout for the queue. For more - // information about the visibility timeout, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. - // The following attributes apply only to dead-letter queues: (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // information about the visibility timeout, see [Visibility Timeout]in the Amazon SQS Developer + // Guide. + // + // The following attributes apply only to [dead-letter queues:] + // // - RedrivePolicy – The string that includes the parameters for the dead-letter // queue functionality of the source queue as a JSON object. The parameters are as // follows: + // // - deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter // queue to which Amazon SQS moves messages after the value of maxReceiveCount is // exceeded. + // // - maxReceiveCount – The number of times a message is delivered to the source // queue before being moved to the dead-letter queue. Default: 10. When the // ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS // moves the message to the dead-letter-queue. + // // - RedriveAllowPolicy – The string that includes the parameters for the // permissions for the dead-letter queue redrive permission and which source queues // can specify dead-letter queues as a JSON object. The parameters are as follows: + // // - redrivePermission – The permission type that defines which source queues can // specify the current queue as the dead-letter queue. Valid values are: + // // - allowAll – (Default) Any source queues in this Amazon Web Services account // in the same Region can specify this queue as the dead-letter queue. + // // - denyAll – No source queues can specify this queue as the dead-letter queue. + // // - byQueue – Only queues specified by the sourceQueueArns parameter can specify // this queue as the dead-letter queue. + // // - sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that // can specify this queue as the dead-letter queue and redrive messages. You can // specify this parameter only when the redrivePermission parameter is set to // byQueue . You can specify up to 10 source queue ARNs. To allow more than 10 // source queues to specify dead-letter queues, set the redrivePermission // parameter to allowAll . + // // The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the - // dead-letter queue of a standard queue must also be a standard queue. The - // following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) - // : + // dead-letter queue of a standard queue must also be a standard queue. + // + // The following attributes apply only to [server-side-encryption]: + // // - KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer - // master key (CMK) for Amazon SQS or a custom CMK. For more information, see - // Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms) - // . + // master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms]. + // // - KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for // which Amazon SQS can reuse a data key to encrypt or decrypt messages before - // calling KMS again. For more information, see How Does the Data Key Reuse - // Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work) - // . + // calling KMS again. For more information, see [How Does the Data Key Reuse Period Work?]. + // // - SqsManagedSseEnabled – Returns information about whether the queue is using // SSE-SQS encryption using SQS owned encryption keys. Only one server-side - // encryption option is supported per queue (for example, SSE-KMS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html) - // or SSE-SQS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html) - // ). - // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) - // : + // encryption option is supported per queue (for example, [SSE-KMS]or [SSE-SQS]). + // + // The following attributes apply only to [FIFO (first-in-first-out) queues]: + // // - FifoQueue – Returns information about whether the queue is FIFO. For more - // information, see FIFO queue logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html) - // in the Amazon SQS Developer Guide. To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) - // , you can check whether QueueName ends with the .fifo suffix. + // information, see [FIFO queue logic]in the Amazon SQS Developer Guide. + // + // To determine whether a queue is [FIFO], you can check whether QueueName ends with the + // .fifo suffix. + // // - ContentBasedDeduplication – Returns whether content-based deduplication is - // enabled for the queue. For more information, see Exactly-once processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon SQS Developer Guide. - // The following attributes apply only to high throughput for FIFO queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) - // : + // enabled for the queue. For more information, see [Exactly-once processing]in the Amazon SQS Developer + // Guide. + // + // The following attributes apply only to [high throughput for FIFO queues]: + // // - DeduplicationScope – Specifies whether message deduplication occurs at the // message group or queue level. Valid values are messageGroup and queue . + // // - FifoThroughputLimit – Specifies whether the FIFO queue throughput quota // applies to the entire queue or per message group. Valid values are perQueue // and perMessageGroupId . The perMessageGroupId value is allowed only when the // value for DeduplicationScope is messageGroup . + // // To enable high throughput for FIFO queues, do the following: + // // - Set DeduplicationScope to messageGroup . + // // - Set FifoThroughputLimit to perMessageGroupId . + // // If you set these attributes to anything other than the values shown for // enabling high throughput, normal throughput is in effect and deduplication - // occurs as specified. For information on throughput quotas, see Quotas related - // to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon SQS Developer Guide. + // occurs as specified. + // + // For information on throughput quotas, see [Quotas related to messages] in the Amazon SQS Developer Guide. + // + // [SSE-KMS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html + // [epoch time]: http://en.wikipedia.org/wiki/Unix_time + // [How Does the Data Key Reuse Period Work?]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work + // [SSE-SQS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html + // [high throughput for FIFO queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html + // [dead-letter queues:]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html + // [Exactly-once processing]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html + // [FIFO]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html + // [Quotas related to messages]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html + // [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html + // [Key Terms]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms + // [server-side-encryption]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html + // [FIFO queue logic]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html + // [FIFO (first-in-first-out) queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html AttributeNames []types.QueueAttributeName noSmithyDocumentSerde @@ -220,6 +275,12 @@ func (c *Client) addOperationGetQueueAttributesMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetQueueAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go index 5e89c73b062..44641059cc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go @@ -10,12 +10,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the URL of an existing Amazon SQS queue. To access a queue that belongs -// to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the -// account ID of the queue's owner. The queue's owner must grant you permission to -// access the queue. For more information about shared queue access, see -// AddPermission or see Allow Developers to Write Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon SQS Developer Guide. +// Returns the URL of an existing Amazon SQS queue. +// +// To access a queue that belongs to another AWS account, use the +// QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. +// The queue's owner must grant you permission to access the queue. For more +// information about shared queue access, see AddPermissionor see [Allow Developers to Write Messages to a Shared Queue] in the Amazon SQS Developer +// Guide. +// +// [Allow Developers to Write Messages to a Shared Queue]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue func (c *Client) GetQueueUrl(ctx context.Context, params *GetQueueUrlInput, optFns ...func(*Options)) (*GetQueueUrlOutput, error) { if params == nil { params = &GetQueueUrlInput{} @@ -34,8 +37,9 @@ func (c *Client) GetQueueUrl(ctx context.Context, params *GetQueueUrlInput, optF type GetQueueUrlInput struct { // The name of the queue whose URL must be fetched. Maximum 80 characters. Valid - // values: alphanumeric characters, hyphens ( - ), and underscores ( _ ). Queue - // URLs and names are case-sensitive. + // values: alphanumeric characters, hyphens ( - ), and underscores ( _ ). + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueName *string @@ -46,8 +50,9 @@ type GetQueueUrlInput struct { noSmithyDocumentSerde } -// For more information, see Interpreting Responses (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html) -// in the Amazon SQS Developer Guide. +// For more information, see [Interpreting Responses] in the Amazon SQS Developer Guide. +// +// [Interpreting Responses]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html type GetQueueUrlOutput struct { // The URL of the queue. @@ -114,6 +119,12 @@ func (c *Client) addOperationGetQueueUrlMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetQueueUrlValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go index c877ac29dc5..e6ad7457085 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go @@ -11,16 +11,20 @@ import ( ) // Returns a list of your queues that have the RedrivePolicy queue attribute -// configured with a dead-letter queue. The ListDeadLetterSourceQueues methods -// supports pagination. Set parameter MaxResults in the request to specify the -// maximum number of results to be returned in the response. If you do not set -// MaxResults , the response includes a maximum of 1,000 results. If you set -// MaxResults and there are additional results to display, the response includes a -// value for NextToken . Use NextToken as a parameter in your next request to -// ListDeadLetterSourceQueues to receive the next page of results. For more -// information about using dead-letter queues, see Using Amazon SQS Dead-Letter -// Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// in the Amazon SQS Developer Guide. +// configured with a dead-letter queue. +// +// The ListDeadLetterSourceQueues methods supports pagination. Set parameter +// MaxResults in the request to specify the maximum number of results to be +// returned in the response. If you do not set MaxResults , the response includes a +// maximum of 1,000 results. If you set MaxResults and there are additional +// results to display, the response includes a value for NextToken . Use NextToken +// as a parameter in your next request to ListDeadLetterSourceQueues to receive +// the next page of results. +// +// For more information about using dead-letter queues, see [Using Amazon SQS Dead-Letter Queues] in the Amazon SQS +// Developer Guide. +// +// [Using Amazon SQS Dead-Letter Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html func (c *Client) ListDeadLetterSourceQueues(ctx context.Context, params *ListDeadLetterSourceQueuesInput, optFns ...func(*Options)) (*ListDeadLetterSourceQueuesOutput, error) { if params == nil { params = &ListDeadLetterSourceQueuesInput{} @@ -38,7 +42,9 @@ func (c *Client) ListDeadLetterSourceQueues(ctx context.Context, params *ListDea type ListDeadLetterSourceQueuesInput struct { - // The URL of a dead-letter queue. Queue URLs and names are case-sensitive. + // The URL of a dead-letter queue. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -128,6 +134,12 @@ func (c *Client) addOperationListDeadLetterSourceQueuesMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListDeadLetterSourceQueuesValidationMiddleware(stack); err != nil { return err } @@ -152,14 +164,6 @@ func (c *Client) addOperationListDeadLetterSourceQueuesMiddlewares(stack *middle return nil } -// ListDeadLetterSourceQueuesAPIClient is a client that implements the -// ListDeadLetterSourceQueues operation. -type ListDeadLetterSourceQueuesAPIClient interface { - ListDeadLetterSourceQueues(context.Context, *ListDeadLetterSourceQueuesInput, ...func(*Options)) (*ListDeadLetterSourceQueuesOutput, error) -} - -var _ ListDeadLetterSourceQueuesAPIClient = (*Client)(nil) - // ListDeadLetterSourceQueuesPaginatorOptions is the paginator options for // ListDeadLetterSourceQueues type ListDeadLetterSourceQueuesPaginatorOptions struct { @@ -227,6 +231,9 @@ func (p *ListDeadLetterSourceQueuesPaginator) NextPage(ctx context.Context, optF } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDeadLetterSourceQueues(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -246,6 +253,14 @@ func (p *ListDeadLetterSourceQueuesPaginator) NextPage(ctx context.Context, optF return result, nil } +// ListDeadLetterSourceQueuesAPIClient is a client that implements the +// ListDeadLetterSourceQueues operation. +type ListDeadLetterSourceQueuesAPIClient interface { + ListDeadLetterSourceQueues(context.Context, *ListDeadLetterSourceQueuesInput, ...func(*Options)) (*ListDeadLetterSourceQueuesOutput, error) +} + +var _ ListDeadLetterSourceQueuesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDeadLetterSourceQueues(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go index 6187d4070a3..c315f91004b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go @@ -13,14 +13,16 @@ import ( // Gets the most recent message movement tasks (up to 10) under a specific source // queue. -// - This action is currently limited to supporting message redrive from -// dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// only. In this context, the source queue is the dead-letter queue (DLQ), while -// the destination queue can be the original source queue (from which the messages -// were driven to the dead-letter-queue), or a custom destination queue. -// - Currently, only standard queues are supported. +// +// - This action is currently limited to supporting message redrive from [dead-letter queues (DLQs)]only. +// In this context, the source queue is the dead-letter queue (DLQ), while the +// destination queue can be the original source queue (from which the messages were +// driven to the dead-letter-queue), or a custom destination queue. +// // - Only one active message movement task is supported per queue at any given // time. +// +// [dead-letter queues (DLQs)]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html func (c *Client) ListMessageMoveTasks(ctx context.Context, params *ListMessageMoveTasksInput, optFns ...func(*Options)) (*ListMessageMoveTasksOutput, error) { if params == nil { params = &ListMessageMoveTasksInput{} @@ -116,6 +118,12 @@ func (c *Client) addOperationListMessageMoveTasksMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListMessageMoveTasksValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go index 226fb297dd4..a84da75947b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go @@ -11,11 +11,13 @@ import ( ) // List all cost allocation tags added to the specified Amazon SQS queue. For an -// overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this -// action. For more information, see Grant cross-account permissions to a role and -// a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// overview, see [Tagging Your Amazon SQS Queues]in the Amazon SQS Developer Guide. +// +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [Tagging Your Amazon SQS Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) ListQueueTags(ctx context.Context, params *ListQueueTagsInput, optFns ...func(*Options)) (*ListQueueTagsOutput, error) { if params == nil { params = &ListQueueTagsInput{} @@ -107,6 +109,12 @@ func (c *Client) addOperationListQueueTagsMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListQueueTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go index 5f580999a2e..5ebf5648f50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go @@ -13,15 +13,19 @@ import ( // Returns a list of your queues in the current region. The response includes a // maximum of 1,000 results. If you specify a value for the optional // QueueNamePrefix parameter, only queues with a name that begins with the -// specified value are returned. The listQueues methods supports pagination. Set -// parameter MaxResults in the request to specify the maximum number of results to -// be returned in the response. If you do not set MaxResults , the response -// includes a maximum of 1,000 results. If you set MaxResults and there are -// additional results to display, the response includes a value for NextToken . Use -// NextToken as a parameter in your next request to listQueues to receive the next -// page of results. Cross-account permissions don't apply to this action. For more -// information, see Grant cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// specified value are returned. +// +// The listQueues methods supports pagination. Set parameter MaxResults in the +// request to specify the maximum number of results to be returned in the response. +// If you do not set MaxResults , the response includes a maximum of 1,000 results. +// If you set MaxResults and there are additional results to display, the response +// includes a value for NextToken . Use NextToken as a parameter in your next +// request to listQueues to receive the next page of results. +// +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) ListQueues(ctx context.Context, params *ListQueuesInput, optFns ...func(*Options)) (*ListQueuesOutput, error) { if params == nil { params = &ListQueuesInput{} @@ -47,8 +51,9 @@ type ListQueuesInput struct { NextToken *string // A string to use for filtering the list results. Only those queues whose name - // begins with the specified string are returned. Queue URLs and names are - // case-sensitive. + // begins with the specified string are returned. + // + // Queue URLs and names are case-sensitive. QueueNamePrefix *string noSmithyDocumentSerde @@ -127,6 +132,12 @@ func (c *Client) addOperationListQueuesMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListQueues(options.Region), middleware.Before); err != nil { return err } @@ -148,13 +159,6 @@ func (c *Client) addOperationListQueuesMiddlewares(stack *middleware.Stack, opti return nil } -// ListQueuesAPIClient is a client that implements the ListQueues operation. -type ListQueuesAPIClient interface { - ListQueues(context.Context, *ListQueuesInput, ...func(*Options)) (*ListQueuesOutput, error) -} - -var _ ListQueuesAPIClient = (*Client)(nil) - // ListQueuesPaginatorOptions is the paginator options for ListQueues type ListQueuesPaginatorOptions struct { // Maximum number of results to include in the response. Value range is 1 to 1000. @@ -219,6 +223,9 @@ func (p *ListQueuesPaginator) NextPage(ctx context.Context, optFns ...func(*Opti } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListQueues(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -238,6 +245,13 @@ func (p *ListQueuesPaginator) NextPage(ctx context.Context, optFns ...func(*Opti return result, nil } +// ListQueuesAPIClient is a client that implements the ListQueues operation. +type ListQueuesAPIClient interface { + ListQueues(context.Context, *ListQueuesInput, ...func(*Options)) (*ListQueuesOutput, error) +} + +var _ ListQueuesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListQueues(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go index aafbf945604..4781c017a07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go @@ -11,12 +11,19 @@ import ( ) // Deletes available messages in a queue (including in-flight messages) specified -// by the QueueURL parameter. When you use the PurgeQueue action, you can't -// retrieve any messages deleted from a queue. The message deletion process takes -// up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's -// size. Messages sent to the queue before you call PurgeQueue might be received -// but are deleted within the next minute. Messages sent to the queue after you -// call PurgeQueue might be deleted while the queue is being purged. +// by the QueueURL parameter. +// +// When you use the PurgeQueue action, you can't retrieve any messages deleted +// from a queue. +// +// The message deletion process takes up to 60 seconds. We recommend waiting for +// 60 seconds regardless of your queue's size. +// +// Messages sent to the queue before you call PurgeQueue might be received but are +// deleted within the next minute. +// +// Messages sent to the queue after you call PurgeQueue might be deleted while the +// queue is being purged. func (c *Client) PurgeQueue(ctx context.Context, params *PurgeQueueInput, optFns ...func(*Options)) (*PurgeQueueOutput, error) { if params == nil { params = &PurgeQueueInput{} @@ -34,8 +41,9 @@ func (c *Client) PurgeQueue(ctx context.Context, params *PurgeQueueInput, optFns type PurgeQueueInput struct { - // The URL of the queue from which the PurgeQueue action deletes messages. Queue - // URLs and names are case-sensitive. + // The URL of the queue from which the PurgeQueue action deletes messages. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -105,6 +113,12 @@ func (c *Client) addOperationPurgeQueueMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpPurgeQueueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go index 6c27d6a7c18..3245a405a1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go @@ -12,37 +12,53 @@ import ( ) // Retrieves one or more messages (up to 10), from the specified queue. Using the -// WaitTimeSeconds parameter enables long-poll support. For more information, see -// Amazon SQS Long Polling (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) -// in the Amazon SQS Developer Guide. Short poll is the default behavior where a -// weighted random set of machines is sampled on a ReceiveMessage call. Thus, only -// the messages on the sampled machines are returned. If the number of messages in -// the queue is small (fewer than 1,000), you most likely get fewer messages than -// you requested per ReceiveMessage call. If the number of messages in the queue -// is extremely small, you might not receive any messages in a particular -// ReceiveMessage response. If this happens, repeat the request. For each message -// returned, the response includes the following: +// WaitTimeSeconds parameter enables long-poll support. For more information, see [Amazon SQS Long Polling] +// in the Amazon SQS Developer Guide. +// +// Short poll is the default behavior where a weighted random set of machines is +// sampled on a ReceiveMessage call. Thus, only the messages on the sampled +// machines are returned. If the number of messages in the queue is small (fewer +// than 1,000), you most likely get fewer messages than you requested per +// ReceiveMessage call. If the number of messages in the queue is extremely small, +// you might not receive any messages in a particular ReceiveMessage response. If +// this happens, repeat the request. +// +// For each message returned, the response includes the following: +// // - The message body. -// - An MD5 digest of the message body. For information about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) -// . +// +// - An MD5 digest of the message body. For information about MD5, see [RFC1321]. +// // - The MessageId you received when you sent the message to the queue. +// // - The receipt handle. +// // - The message attributes. +// // - An MD5 digest of the message attributes. // // The receipt handle is the identifier you must provide when deleting the -// message. For more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) -// in the Amazon SQS Developer Guide. You can provide the VisibilityTimeout -// parameter in your request. The parameter is applied to the messages that Amazon -// SQS returns in the response. If you don't include the parameter, the overall -// visibility timeout for the queue is used for the returned messages. For more -// information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon SQS Developer Guide. A message that isn't deleted or a message -// whose visibility isn't extended before the visibility timeout expires counts as -// a failed receive. Depending on the configuration of the queue, the message might -// be sent to the dead-letter queue. In the future, new attributes might be added. -// If you write code that calls this action, we recommend that you structure your -// code so that it can handle new attributes gracefully. +// message. For more information, see [Queue and Message Identifiers]in the Amazon SQS Developer Guide. +// +// You can provide the VisibilityTimeout parameter in your request. The parameter +// is applied to the messages that Amazon SQS returns in the response. If you don't +// include the parameter, the overall visibility timeout for the queue is used for +// the returned messages. For more information, see [Visibility Timeout]in the Amazon SQS Developer +// Guide. +// +// A message that isn't deleted or a message whose visibility isn't extended +// before the visibility timeout expires counts as a failed receive. Depending on +// the configuration of the queue, the message might be sent to the dead-letter +// queue. +// +// In the future, new attributes might be added. If you write code that calls this +// action, we recommend that you structure your code so that it can handle new +// attributes gracefully. +// +// [Queue and Message Identifiers]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html +// [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html +// [Amazon SQS Long Polling]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html +// [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt func (c *Client) ReceiveMessage(ctx context.Context, params *ReceiveMessageInput, optFns ...func(*Options)) (*ReceiveMessageOutput, error) { if params == nil { params = &ReceiveMessageInput{} @@ -60,38 +76,57 @@ func (c *Client) ReceiveMessage(ctx context.Context, params *ReceiveMessageInput type ReceiveMessageInput struct { - // The URL of the Amazon SQS queue from which messages are received. Queue URLs - // and names are case-sensitive. + // The URL of the Amazon SQS queue from which messages are received. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string + // This parameter has been deprecated but will be supported for backward + // compatibility. To provide attribute names, you are encouraged to use + // MessageSystemAttributeNames . + // // A list of attributes that need to be returned along with each message. These // attributes include: + // // - All – Returns all values. + // // - ApproximateFirstReceiveTimestamp – Returns the time the message was first - // received from the queue ( epoch time (http://en.wikipedia.org/wiki/Unix_time) - // in milliseconds). + // received from the queue ([epoch time] in milliseconds). + // // - ApproximateReceiveCount – Returns the number of times a message has been // received across all queues but not deleted. + // // - AWSTraceHeader – Returns the X-Ray trace header string. + // // - SenderId + // // - For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R . + // // - For an IAM role, returns the IAM role ID, for example // ABCDE1F2GH3I4JK5LMNOP:i-a123b456 . - // - SentTimestamp – Returns the time the message was sent to the queue ( epoch - // time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). + // + // - SentTimestamp – Returns the time the message was sent to the queue ([epoch time] in + // milliseconds). + // // - SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned // encryption keys. Only one server-side encryption option is supported per queue - // (for example, SSE-KMS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html) - // or SSE-SQS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html) - // ). + // (for example, [SSE-KMS]or [SSE-SQS]). + // // - MessageDeduplicationId – Returns the value provided by the producer that - // calls the SendMessage action. - // - MessageGroupId – Returns the value provided by the producer that calls the - // SendMessage action. Messages with the same MessageGroupId are returned in - // sequence. + // calls the SendMessageaction. + // + // - MessageGroupId – Returns the value provided by the producer that calls the SendMessage + // action. Messages with the same MessageGroupId are returned in sequence. + // // - SequenceNumber – Returns the value provided by Amazon SQS. + // + // [SSE-KMS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html + // [epoch time]: http://en.wikipedia.org/wiki/Unix_time + // [SSE-SQS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html + // + // Deprecated: AttributeNames has been replaced by MessageSystemAttributeNames AttributeNames []types.QueueAttributeName // The maximum number of messages to return. Amazon SQS never returns more @@ -100,61 +135,119 @@ type ReceiveMessageInput struct { MaxNumberOfMessages int32 // The name of the message attribute, where N is the index. + // // - The name can contain alphanumeric characters and the underscore ( _ ), // hyphen ( - ), and period ( . ). + // // - The name is case-sensitive and must be unique among all attribute names for // the message. + // // - The name must not start with AWS-reserved prefixes such as AWS. or Amazon. // (or any casing variants). + // // - The name must not start or end with a period ( . ), and it should not have // periods in succession ( .. ). + // // - The name can be up to 256 characters long. + // // When using ReceiveMessage , you can send a list of attribute names to receive, // or you can return all of the attributes by specifying All or .* in your // request. You can also use all message attributes starting with a prefix, for // example bar.* . MessageAttributeNames []string - // This parameter applies only to FIFO (first-in-first-out) queues. The token used - // for deduplication of ReceiveMessage calls. If a networking issue occurs after a - // ReceiveMessage action, and instead of a response you receive a generic error, it - // is possible to retry the same action with an identical ReceiveRequestAttemptId - // to retrieve the same set of messages, even if their visibility timeout has not - // yet expired. + // A list of attributes that need to be returned along with each message. These + // attributes include: + // + // - All – Returns all values. + // + // - ApproximateFirstReceiveTimestamp – Returns the time the message was first + // received from the queue ([epoch time] in milliseconds). + // + // - ApproximateReceiveCount – Returns the number of times a message has been + // received across all queues but not deleted. + // + // - AWSTraceHeader – Returns the X-Ray trace header string. + // + // - SenderId + // + // - For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R . + // + // - For an IAM role, returns the IAM role ID, for example + // ABCDE1F2GH3I4JK5LMNOP:i-a123b456 . + // + // - SentTimestamp – Returns the time the message was sent to the queue ([epoch time] in + // milliseconds). + // + // - SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned + // encryption keys. Only one server-side encryption option is supported per queue + // (for example, [SSE-KMS]or [SSE-SQS]). + // + // - MessageDeduplicationId – Returns the value provided by the producer that + // calls the SendMessageaction. + // + // - MessageGroupId – Returns the value provided by the producer that calls the SendMessage + // action. Messages with the same MessageGroupId are returned in sequence. + // + // - SequenceNumber – Returns the value provided by Amazon SQS. + // + // [SSE-KMS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html + // [epoch time]: http://en.wikipedia.org/wiki/Unix_time + // [SSE-SQS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html + MessageSystemAttributeNames []types.MessageSystemAttributeName + + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The token used for deduplication of ReceiveMessage calls. If a networking issue + // occurs after a ReceiveMessage action, and instead of a response you receive a + // generic error, it is possible to retry the same action with an identical + // ReceiveRequestAttemptId to retrieve the same set of messages, even if their + // visibility timeout has not yet expired. + // // - You can use ReceiveRequestAttemptId only for 5 minutes after a // ReceiveMessage action. + // // - When you set FifoQueue , a caller of the ReceiveMessage action can provide a // ReceiveRequestAttemptId explicitly. - // - If a caller of the ReceiveMessage action doesn't provide a - // ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId . + // // - It is possible to retry the ReceiveMessage action with the same // ReceiveRequestAttemptId if none of the messages have been modified (deleted or // had their visibility changes). + // // - During a visibility timeout, subsequent calls with the same // ReceiveRequestAttemptId return the same messages and receipt handles. If a // retry occurs within the deduplication interval, it resets the visibility - // timeout. For more information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action - // still processes messages when the visibility timeout expires and messages become - // visible, another worker consuming from the same queue can receive the same - // messages and therefore process duplicates. Also, if a consumer whose message - // processing time is longer than the visibility timeout tries to delete the - // processed messages, the action fails with an error. To mitigate this effect, - // ensure that your application observes a safe threshold before the visibility - // timeout expires and extend the visibility timeout as necessary. + // timeout. For more information, see [Visibility Timeout]in the Amazon SQS Developer Guide. + // + // If a caller of the ReceiveMessage action still processes messages when the + // visibility timeout expires and messages become visible, another worker consuming + // from the same queue can receive the same messages and therefore process + // duplicates. Also, if a consumer whose message processing time is longer than the + // visibility timeout tries to delete the processed messages, the action fails with + // an error. + // + // To mitigate this effect, ensure that your application observes a safe threshold + // before the visibility timeout expires and extend the visibility timeout as + // necessary. + // // - While messages with a particular MessageGroupId are invisible, no more // messages belonging to the same MessageGroupId are returned until the // visibility timeout expires. You can still receive messages with another // MessageGroupId as long as it is also visible. + // // - If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no // retries work until the original visibility timeout expires. As a result, delays // might occur but the messages in the queue remain in a strict order. + // // The maximum length of ReceiveRequestAttemptId is 128 characters. // ReceiveRequestAttemptId can contain alphanumeric characters ( a-z , A-Z , 0-9 ) - // and punctuation ( !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). For best practices of - // using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request - // Parameter (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html) - // in the Amazon SQS Developer Guide. + // and punctuation ( !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). + // + // For best practices of using ReceiveRequestAttemptId , see [Using the ReceiveRequestAttemptId Request Parameter] in the Amazon SQS + // Developer Guide. + // + // [Using the ReceiveRequestAttemptId Request Parameter]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html + // [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html ReceiveRequestAttemptId *string // The duration (in seconds) that the received messages are hidden from subsequent @@ -164,12 +257,15 @@ type ReceiveMessageInput struct { // The duration (in seconds) for which the call waits for a message to arrive in // the queue before returning. If a message is available, the call returns sooner // than WaitTimeSeconds . If no messages are available and the wait time expires, - // the call returns successfully with an empty list of messages. To avoid HTTP - // errors, ensure that the HTTP response timeout for ReceiveMessage requests is - // longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you - // can set HTTP transport settings using the NettyNioAsyncHttpClient (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.html) - // for asynchronous clients, or the ApacheHttpClient (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.html) - // for synchronous clients. + // the call does not return a message list. + // + // To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage + // requests is longer than the WaitTimeSeconds parameter. For example, with the + // Java SDK, you can set HTTP transport settings using the [NettyNioAsyncHttpClient]for asynchronous + // clients, or the [ApacheHttpClient]for synchronous clients. + // + // [NettyNioAsyncHttpClient]: https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.html + // [ApacheHttpClient]: https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.html WaitTimeSeconds int32 noSmithyDocumentSerde @@ -245,6 +341,12 @@ func (c *Client) addOperationReceiveMessageMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpReceiveMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go index 8bf0fe6a7ef..a83f4d95b4d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go @@ -12,13 +12,17 @@ import ( // Revokes any permissions in the queue policy that matches the specified Label // parameter. +// // - Only the owner of a queue can remove permissions from it. +// // - Cross-account permissions don't apply to this action. For more information, -// see Grant cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon SQS Developer Guide. +// see [Grant cross-account permissions to a role and a username]in the Amazon SQS Developer Guide. +// // - To remove the ability to change queue permissions, you must deny permission // to the AddPermission , RemovePermission , and SetQueueAttributes actions in // your IAM policy. +// +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) RemovePermission(ctx context.Context, params *RemovePermissionInput, optFns ...func(*Options)) (*RemovePermissionOutput, error) { if params == nil { params = &RemovePermissionInput{} @@ -37,13 +41,14 @@ func (c *Client) RemovePermission(ctx context.Context, params *RemovePermissionI type RemovePermissionInput struct { // The identification of the permission to remove. This is the label added using - // the AddPermission action. + // the AddPermissionaction. // // This member is required. Label *string - // The URL of the Amazon SQS queue from which permissions are removed. Queue URLs - // and names are case-sensitive. + // The URL of the Amazon SQS queue from which permissions are removed. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -113,6 +118,12 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRemovePermissionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go index 4dffc5017d4..739dcdbc3db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go @@ -11,11 +11,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Delivers a message to the specified queue. A message can include only XML, -// JSON, and unformatted text. The following Unicode characters are allowed: #x9 | -// #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any -// characters not included in this list will be rejected. For more information, see -// the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets) . +// Delivers a message to the specified queue. +// +// A message can include only XML, JSON, and unformatted text. The following +// Unicode characters are allowed. For more information, see the [W3C specification for characters]. +// +// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF +// +// Amazon SQS does not throw an exception or completely reject the message if it +// contains invalid characters. Instead, it replaces those invalid characters with +// U+FFFD before storing the message in the queue, as long as the message body +// contains at least one valid character. +// +// [W3C specification for characters]: http://www.w3.org/TR/REC-xml/#charsets func (c *Client) SendMessage(ctx context.Context, params *SendMessageInput, optFns ...func(*Options)) (*SendMessageOutput, error) { if params == nil { params = &SendMessageInput{} @@ -34,96 +42,134 @@ func (c *Client) SendMessage(ctx context.Context, params *SendMessageInput, optF type SendMessageInput struct { // The message to send. The minimum size is one character. The maximum size is 256 - // KiB. A message can include only XML, JSON, and unformatted text. The following - // Unicode characters are allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to - // #xFFFD | #x10000 to #x10FFFF Any characters not included in this list will be - // rejected. For more information, see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets) - // . + // KiB. + // + // A message can include only XML, JSON, and unformatted text. The following + // Unicode characters are allowed. For more information, see the [W3C specification for characters]. + // + // #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF + // + // Amazon SQS does not throw an exception or completely reject the message if it + // contains invalid characters. Instead, it replaces those invalid characters with + // U+FFFD before storing the message in the queue, as long as the message body + // contains at least one valid character. + // + // [W3C specification for characters]: http://www.w3.org/TR/REC-xml/#charsets // // This member is required. MessageBody *string - // The URL of the Amazon SQS queue to which a message is sent. Queue URLs and - // names are case-sensitive. + // The URL of the Amazon SQS queue to which a message is sent. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string - // The length of time, in seconds, for which to delay a specific message. Valid + // The length of time, in seconds, for which to delay a specific message. Valid // values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds // value become available for processing after the delay period is finished. If you - // don't specify a value, the default value for the queue applies. When you set - // FifoQueue , you can't set DelaySeconds per message. You can set this parameter - // only on a queue level. + // don't specify a value, the default value for the queue applies. + // + // When you set FifoQueue , you can't set DelaySeconds per message. You can set + // this parameter only on a queue level. DelaySeconds int32 // Each message attribute consists of a Name , Type , and Value . For more - // information, see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon SQS Developer Guide. + // information, see [Amazon SQS message attributes]in the Amazon SQS Developer Guide. + // + // [Amazon SQS message attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes MessageAttributes map[string]types.MessageAttributeValue - // This parameter applies only to FIFO (first-in-first-out) queues. The token used - // for deduplication of sent messages. If a message with a particular - // MessageDeduplicationId is sent successfully, any messages sent with the same - // MessageDeduplicationId are accepted successfully but aren't delivered during the - // 5-minute deduplication interval. For more information, see Exactly-once - // processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon SQS Developer Guide. + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The token used for deduplication of sent messages. If a message with a + // particular MessageDeduplicationId is sent successfully, any messages sent with + // the same MessageDeduplicationId are accepted successfully but aren't delivered + // during the 5-minute deduplication interval. For more information, see [Exactly-once processing]in the + // Amazon SQS Developer Guide. + // // - Every message must have a unique MessageDeduplicationId , + // // - You may provide a MessageDeduplicationId explicitly. + // // - If you aren't able to provide a MessageDeduplicationId and you enable // ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to // generate the MessageDeduplicationId using the body of the message (but not the // attributes of the message). + // // - If you don't provide a MessageDeduplicationId and the queue doesn't have // ContentBasedDeduplication set, the action fails with an error. + // // - If the queue has ContentBasedDeduplication set, your MessageDeduplicationId // overrides the generated one. + // // - When ContentBasedDeduplication is in effect, messages with identical content // sent within the deduplication interval are treated as duplicates and only one // copy of the message is delivered. + // // - If you send one message with ContentBasedDeduplication enabled and then // another message with a MessageDeduplicationId that is the same as the one // generated for the first MessageDeduplicationId , the two messages are treated // as duplicates and only one copy of the message is delivered. + // // The MessageDeduplicationId is available to the consumer of the message (this - // can be useful for troubleshooting delivery issues). If a message is sent - // successfully but the acknowledgement is lost and the message is resent with the - // same MessageDeduplicationId after the deduplication interval, Amazon SQS can't - // detect duplicate messages. Amazon SQS continues to keep track of the message - // deduplication ID even after the message is received and deleted. The maximum - // length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can - // contain alphanumeric characters ( a-z , A-Z , 0-9 ) and punctuation ( - // !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). For best practices of using - // MessageDeduplicationId , see Using the MessageDeduplicationId Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon SQS Developer Guide. + // can be useful for troubleshooting delivery issues). + // + // If a message is sent successfully but the acknowledgement is lost and the + // message is resent with the same MessageDeduplicationId after the deduplication + // interval, Amazon SQS can't detect duplicate messages. + // + // Amazon SQS continues to keep track of the message deduplication ID even after + // the message is received and deleted. + // + // The maximum length of MessageDeduplicationId is 128 characters. + // MessageDeduplicationId can contain alphanumeric characters ( a-z , A-Z , 0-9 ) + // and punctuation ( !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). + // + // For best practices of using MessageDeduplicationId , see [Using the MessageDeduplicationId Property] in the Amazon SQS + // Developer Guide. + // + // [Using the MessageDeduplicationId Property]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html + // [Exactly-once processing]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html MessageDeduplicationId *string - // This parameter applies only to FIFO (first-in-first-out) queues. The tag that - // specifies that a message belongs to a specific message group. Messages that - // belong to the same message group are processed in a FIFO manner (however, - // messages in different message groups might be processed out of order). To - // interleave multiple ordered streams within a single queue, use MessageGroupId + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The tag that specifies that a message belongs to a specific message group. + // Messages that belong to the same message group are processed in a FIFO manner + // (however, messages in different message groups might be processed out of order). + // To interleave multiple ordered streams within a single queue, use MessageGroupId // values (for example, session data for multiple users). In this scenario, // multiple consumers can process the queue, but the session data of each user is // processed in a FIFO fashion. + // // - You must associate a non-empty MessageGroupId with a message. If you don't // provide a MessageGroupId , the action fails. + // // - ReceiveMessage might return messages with multiple MessageGroupId values. // For each MessageGroupId , the messages are sorted by time sent. The caller // can't specify a MessageGroupId . - // The length of MessageGroupId is 128 characters. Valid values: alphanumeric - // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~) . For best - // practices of using MessageGroupId , see Using the MessageGroupId Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon SQS Developer Guide. MessageGroupId is required for FIFO queues. - // You can't use it for Standard queues. + // + // The maximum length of MessageGroupId is 128 characters. Valid values: + // alphanumeric characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~) . + // + // For best practices of using MessageGroupId , see [Using the MessageGroupId Property] in the Amazon SQS Developer + // Guide. + // + // MessageGroupId is required for FIFO queues. You can't use it for Standard + // queues. + // + // [Using the MessageGroupId Property]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html MessageGroupId *string // The message system attribute to send. Each message system attribute consists of // a Name , Type , and Value . + // // - Currently, the only supported message system attribute is AWSTraceHeader . // Its type must be String and its value must be a correctly formatted X-Ray // trace header string. + // // - The size of a message system attribute doesn't count towards the total size // of a message. MessageSystemAttributes map[string]types.MessageSystemAttributeValue @@ -137,13 +183,17 @@ type SendMessageOutput struct { // An MD5 digest of the non-URL-encoded message attribute string. You can use this // attribute to verify that Amazon SQS received the message correctly. Amazon SQS // URL-decodes the message before creating the MD5 digest. For information about - // MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt MD5OfMessageAttributes *string // An MD5 digest of the non-URL-encoded message body string. You can use this // attribute to verify that Amazon SQS received the message correctly. Amazon SQS // URL-decodes the message before creating the MD5 digest. For information about - // MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt MD5OfMessageBody *string // An MD5 digest of the non-URL-encoded message system attribute string. You can @@ -152,14 +202,17 @@ type SendMessageOutput struct { MD5OfMessageSystemAttributes *string // An attribute containing the MessageId of the message sent to the queue. For - // more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) - // in the Amazon SQS Developer Guide. + // more information, see [Queue and Message Identifiers]in the Amazon SQS Developer Guide. + // + // [Queue and Message Identifiers]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html MessageId *string - // This parameter applies only to FIFO (first-in-first-out) queues. The large, - // non-consecutive number that Amazon SQS assigns to each message. The length of - // SequenceNumber is 128 bits. SequenceNumber continues to increase for a - // particular MessageGroupId . + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The large, non-consecutive number that Amazon SQS assigns to each message. + // + // The length of SequenceNumber is 128 bits. SequenceNumber continues to increase + // for a particular MessageGroupId . SequenceNumber *string // Metadata pertaining to the operation's result. @@ -226,6 +279,12 @@ func (c *Client) addOperationSendMessageMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSendMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go index 3323bfe0911..cd32a3145dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go @@ -13,20 +13,32 @@ import ( // You can use SendMessageBatch to send up to 10 messages to the specified queue // by assigning either identical or different values to each message (or by not -// assigning values at all). This is a batch version of SendMessage . For a FIFO -// queue, multiple messages within a single batch are enqueued in the order they -// are sent. The result of sending each message is reported individually in the -// response. Because the batch request can result in a combination of successful -// and unsuccessful actions, you should check for batch errors even when the call -// returns an HTTP status code of 200 . The maximum allowed individual message size -// and the maximum total payload size (the sum of the individual lengths of all of -// the batched messages) are both 256 KiB (262,144 bytes). A message can include -// only XML, JSON, and unformatted text. The following Unicode characters are -// allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to -// #x10FFFF Any characters not included in this list will be rejected. For more -// information, see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets) -// . If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses +// assigning values at all). This is a batch version of SendMessage. For a FIFO queue, +// multiple messages within a single batch are enqueued in the order they are sent. +// +// The result of sending each message is reported individually in the response. +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200 . +// +// The maximum allowed individual message size and the maximum total payload size +// (the sum of the individual lengths of all of the batched messages) are both 256 +// KiB (262,144 bytes). +// +// A message can include only XML, JSON, and unformatted text. The following +// Unicode characters are allowed. For more information, see the [W3C specification for characters]. +// +// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF +// +// Amazon SQS does not throw an exception or completely reject the message if it +// contains invalid characters. Instead, it replaces those invalid characters with +// U+FFFD before storing the message in the queue, as long as the message body +// contains at least one valid character. +// +// If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses // the default value for the queue. +// +// [W3C specification for characters]: http://www.w3.org/TR/REC-xml/#charsets func (c *Client) SendMessageBatch(ctx context.Context, params *SendMessageBatchInput, optFns ...func(*Options)) (*SendMessageBatchOutput, error) { if params == nil { params = &SendMessageBatchInput{} @@ -49,8 +61,9 @@ type SendMessageBatchInput struct { // This member is required. Entries []types.SendMessageBatchRequestEntry - // The URL of the Amazon SQS queue to which batched messages are sent. Queue URLs - // and names are case-sensitive. + // The URL of the Amazon SQS queue to which batched messages are sent. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -58,13 +71,11 @@ type SendMessageBatchInput struct { noSmithyDocumentSerde } -// For each message in the batch, the response contains a -// SendMessageBatchResultEntry tag if the message succeeds or a -// BatchResultErrorEntry tag if the message fails. +// For each message in the batch, the response contains a SendMessageBatchResultEntry tag if the message +// succeeds or a BatchResultErrorEntrytag if the message fails. type SendMessageBatchOutput struct { - // A list of BatchResultErrorEntry items with error details about each message - // that can't be enqueued. + // A list of BatchResultErrorEntry items with error details about each message that can't be enqueued. // // This member is required. Failed []types.BatchResultErrorEntry @@ -138,6 +149,12 @@ func (c *Client) addOperationSendMessageBatchMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSendMessageBatchValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go index 7700ca729ec..0a20b061b28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go @@ -10,22 +10,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the value of one or more queue attributes. When you change a queue's -// attributes, the change can take up to 60 seconds for most of the attributes to -// propagate throughout the Amazon SQS system. Changes made to the +// Sets the value of one or more queue attributes, like a policy. When you change +// a queue's attributes, the change can take up to 60 seconds for most of the +// attributes to propagate throughout the Amazon SQS system. Changes made to the // MessageRetentionPeriod attribute can take up to 15 minutes and will impact // existing messages in the queue potentially causing them to be expired and // deleted if the MessageRetentionPeriod is reduced below the age of existing // messages. +// // - In the future, new attributes might be added. If you write code that calls // this action, we recommend that you structure your code so that it can handle new // attributes gracefully. +// // - Cross-account permissions don't apply to this action. For more information, -// see Grant cross-account permissions to a role and a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon SQS Developer Guide. +// see [Grant cross-account permissions to a role and a username]in the Amazon SQS Developer Guide. +// // - To remove the ability to change queue permissions, you must deny permission // to the AddPermission , RemovePermission , and SetQueueAttributes actions in // your IAM policy. +// +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) SetQueueAttributes(ctx context.Context, params *SetQueueAttributesInput, optFns ...func(*Options)) (*SetQueueAttributesOutput, error) { if params == nil { params = &SetQueueAttributesInput{} @@ -43,15 +47,19 @@ func (c *Client) SetQueueAttributes(ctx context.Context, params *SetQueueAttribu type SetQueueAttributesInput struct { - // A map of attributes to set. The following lists the names, descriptions, and - // values of the special request parameters that the SetQueueAttributes action - // uses: + // A map of attributes to set. + // + // The following lists the names, descriptions, and values of the special request + // parameters that the SetQueueAttributes action uses: + // // - DelaySeconds – The length of time, in seconds, for which the delivery of all // messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 // minutes). Default: 0. + // // - MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) // up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). + // // - MessageRetentionPeriod – The length of time, in seconds, for which Amazon // SQS retains a message. Valid values: An integer representing seconds, from 60 (1 // minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you change a @@ -61,111 +69,151 @@ type SetQueueAttributesInput struct { // existing messages in the queue potentially causing them to be expired and // deleted if the MessageRetentionPeriod is reduced below the age of existing // messages. + // // - Policy – The queue's policy. A valid Amazon Web Services policy. For more - // information about policy structure, see Overview of Amazon Web Services IAM - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Identity and Access Management User Guide. - // - ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a - // ReceiveMessage action waits for a message to arrive. Valid values: An integer - // from 0 to 20 (seconds). Default: 0. + // information about policy structure, see [Overview of Amazon Web Services IAM Policies]in the Identity and Access Management + // User Guide. + // + // - ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage + // action waits for a message to arrive. Valid values: An integer from 0 to 20 + // (seconds). Default: 0. + // // - VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid // values: An integer from 0 to 43,200 (12 hours). Default: 30. For more - // information about the visibility timeout, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. - // The following attributes apply only to dead-letter queues: (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // information about the visibility timeout, see [Visibility Timeout]in the Amazon SQS Developer + // Guide. + // + // The following attributes apply only to [dead-letter queues:] + // // - RedrivePolicy – The string that includes the parameters for the dead-letter // queue functionality of the source queue as a JSON object. The parameters are as // follows: + // // - deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter // queue to which Amazon SQS moves messages after the value of maxReceiveCount is // exceeded. + // // - maxReceiveCount – The number of times a message is delivered to the source // queue before being moved to the dead-letter queue. Default: 10. When the // ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS // moves the message to the dead-letter-queue. + // // - RedriveAllowPolicy – The string that includes the parameters for the // permissions for the dead-letter queue redrive permission and which source queues // can specify dead-letter queues as a JSON object. The parameters are as follows: + // // - redrivePermission – The permission type that defines which source queues can // specify the current queue as the dead-letter queue. Valid values are: + // // - allowAll – (Default) Any source queues in this Amazon Web Services account // in the same Region can specify this queue as the dead-letter queue. + // // - denyAll – No source queues can specify this queue as the dead-letter queue. + // // - byQueue – Only queues specified by the sourceQueueArns parameter can specify // this queue as the dead-letter queue. + // // - sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that // can specify this queue as the dead-letter queue and redrive messages. You can // specify this parameter only when the redrivePermission parameter is set to // byQueue . You can specify up to 10 source queue ARNs. To allow more than 10 // source queues to specify dead-letter queues, set the redrivePermission // parameter to allowAll . + // // The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the - // dead-letter queue of a standard queue must also be a standard queue. The - // following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) - // : + // dead-letter queue of a standard queue must also be a standard queue. + // + // The following attributes apply only to [server-side-encryption]: + // // - KmsMasterKeyId – The ID of an Amazon Web Services managed customer master - // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms) - // . While the alias of the AWS-managed CMK for Amazon SQS is always - // alias/aws/sqs , the alias of a custom CMK can, for example, be alias/MyAlias - // . For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the Key Management Service API Reference. + // key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms]. While + // the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the + // alias of a custom CMK can, for example, be alias/MyAlias . For more examples, + // see [KeyId]in the Key Management Service API Reference. + // // - KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling KMS again. An integer representing - // seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: - // 300 (5 minutes). A shorter time period provides better security but results in - // more calls to KMS which might incur charges after Free Tier. For more - // information, see How Does the Data Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work) - // . + // Amazon SQS can reuse a [data key]to encrypt or decrypt messages before calling KMS + // again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 + // seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides + // better security but results in more calls to KMS which might incur charges after + // Free Tier. For more information, see [How Does the Data Key Reuse Period Work?]. + // // - SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned // encryption keys. Only one server-side encryption option is supported per queue - // (for example, SSE-KMS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html) - // or SSE-SQS (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html) - // ). - // The following attribute applies only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) - // : + // (for example, [SSE-KMS]or [SSE-SQS]). + // + // The following attribute applies only to [FIFO (first-in-first-out) queues]: + // // - ContentBasedDeduplication – Enables content-based deduplication. For more - // information, see Exactly-once processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon SQS Developer Guide. Note the following: + // information, see [Exactly-once processing]in the Amazon SQS Developer Guide. Note the following: + // // - Every message must have a unique MessageDeduplicationId . + // // - You may provide a MessageDeduplicationId explicitly. + // // - If you aren't able to provide a MessageDeduplicationId and you enable // ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to // generate the MessageDeduplicationId using the body of the message (but not the // attributes of the message). + // // - If you don't provide a MessageDeduplicationId and the queue doesn't have // ContentBasedDeduplication set, the action fails with an error. + // // - If the queue has ContentBasedDeduplication set, your MessageDeduplicationId // overrides the generated one. + // // - When ContentBasedDeduplication is in effect, messages with identical content // sent within the deduplication interval are treated as duplicates and only one // copy of the message is delivered. + // // - If you send one message with ContentBasedDeduplication enabled and then // another message with a MessageDeduplicationId that is the same as the one // generated for the first MessageDeduplicationId , the two messages are treated // as duplicates and only one copy of the message is delivered. - // The following attributes apply only to high throughput for FIFO queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) - // : + // + // The following attributes apply only to [high throughput for FIFO queues]: + // // - DeduplicationScope – Specifies whether message deduplication occurs at the // message group or queue level. Valid values are messageGroup and queue . + // // - FifoThroughputLimit – Specifies whether the FIFO queue throughput quota // applies to the entire queue or per message group. Valid values are perQueue // and perMessageGroupId . The perMessageGroupId value is allowed only when the // value for DeduplicationScope is messageGroup . + // // To enable high throughput for FIFO queues, do the following: + // // - Set DeduplicationScope to messageGroup . + // // - Set FifoThroughputLimit to perMessageGroupId . + // // If you set these attributes to anything other than the values shown for // enabling high throughput, normal throughput is in effect and deduplication - // occurs as specified. For information on throughput quotas, see Quotas related - // to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon SQS Developer Guide. + // occurs as specified. + // + // For information on throughput quotas, see [Quotas related to messages] in the Amazon SQS Developer Guide. + // + // [SSE-KMS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html + // [data key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys + // [How Does the Data Key Reuse Period Work?]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work + // [SSE-SQS]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sqs-sse-queue.html + // [high throughput for FIFO queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html + // [Overview of Amazon Web Services IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html + // [dead-letter queues:]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html + // [Exactly-once processing]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html + // [KeyId]: https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters + // [Quotas related to messages]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html + // [Visibility Timeout]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html + // [Key Terms]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms + // [server-side-encryption]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html + // [FIFO (first-in-first-out) queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html // // This member is required. Attributes map[string]string - // The URL of the Amazon SQS queue whose attributes are set. Queue URLs and names - // are case-sensitive. + // The URL of the Amazon SQS queue whose attributes are set. + // + // Queue URLs and names are case-sensitive. // // This member is required. QueueUrl *string @@ -235,6 +283,12 @@ func (c *Client) addOperationSetQueueAttributesMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpSetQueueAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go index 9acd845740a..378e78cb8a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go @@ -12,18 +12,21 @@ import ( // Starts an asynchronous task to move messages from a specified source queue to a // specified destination queue. +// // - This action is currently limited to supporting message redrive from queues -// that are configured as dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, -// such as Lambda or Amazon SNS topics, are currently not supported. +// that are configured as [dead-letter queues (DLQs)]of other Amazon SQS queues only. Non-SQS queue sources +// of dead-letter queues, such as Lambda or Amazon SNS topics, are currently not +// supported. +// // - In dead-letter queues redrive context, the StartMessageMoveTask the source // queue is the DLQ, while the destination queue can be the original source queue // (from which the messages were driven to the dead-letter-queue), or a custom // destination queue. -// - Currently, only standard queues support redrive. FIFO queues don't support -// redrive. +// // - Only one active message movement task is supported per queue at any given // time. +// +// [dead-letter queues (DLQs)]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html func (c *Client) StartMessageMoveTask(ctx context.Context, params *StartMessageMoveTaskInput, optFns ...func(*Options)) (*StartMessageMoveTaskOutput, error) { if params == nil { params = &StartMessageMoveTaskInput{} @@ -133,6 +136,12 @@ func (c *Client) addOperationStartMessageMoveTaskMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartMessageMoveTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go index 3f07bf4b07c..8a062de5cf0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go @@ -11,21 +11,28 @@ import ( ) // Add cost allocation tags to the specified Amazon SQS queue. For an overview, -// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon SQS Developer Guide. When you use queue tags, keep the following -// guidelines in mind: +// see [Tagging Your Amazon SQS Queues]in the Amazon SQS Developer Guide. +// +// When you use queue tags, keep the following guidelines in mind: +// // - Adding more than 50 tags to a queue isn't recommended. +// // - Tags don't have any semantic meaning. Amazon SQS interprets tags as // character strings. +// // - Tags are case-sensitive. +// // - A new tag with a key identical to that of an existing tag overwrites the // existing tag. // -// For a full list of tag restrictions, see Quotas related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) -// in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this -// action. For more information, see Grant cross-account permissions to a role and -// a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// For a full list of tag restrictions, see [Quotas related to queues] in the Amazon SQS Developer Guide. +// +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [Tagging Your Amazon SQS Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html +// [Quotas related to queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) TagQueue(ctx context.Context, params *TagQueueInput, optFns ...func(*Options)) (*TagQueueOutput, error) { if params == nil { params = &TagQueueInput{} @@ -118,6 +125,12 @@ func (c *Client) addOperationTagQueueMiddlewares(stack *middleware.Stack, option if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpTagQueueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go index 563d8ac898d..3129e81616e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go @@ -11,11 +11,13 @@ import ( ) // Remove cost allocation tags from the specified Amazon SQS queue. For an -// overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this -// action. For more information, see Grant cross-account permissions to a role and -// a username (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) +// overview, see [Tagging Your Amazon SQS Queues]in the Amazon SQS Developer Guide. +// +// Cross-account permissions don't apply to this action. For more information, see [Grant cross-account permissions to a role and a username] // in the Amazon SQS Developer Guide. +// +// [Tagging Your Amazon SQS Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html +// [Grant cross-account permissions to a role and a username]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name func (c *Client) UntagQueue(ctx context.Context, params *UntagQueueInput, optFns ...func(*Options)) (*UntagQueueOutput, error) { if params == nil { params = &UntagQueueInput{} @@ -108,6 +110,12 @@ func (c *Client) addOperationUntagQueueMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpUntagQueueValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/auth.go index 22607b3fb91..820536ccb3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -145,7 +145,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/deserializers.go index cecf9816eab..dc5eeae6aa1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/deserializers.go @@ -14,12 +14,22 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsjson10_deserializeOpAddPermission struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/doc.go index e3bfe76da32..a6517de380a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/doc.go @@ -3,25 +3,48 @@ // Package sqs provides the API client, operations, and parameter types for Amazon // Simple Queue Service. // -// Welcome to the Amazon SQS API Reference. Amazon SQS is a reliable, -// highly-scalable hosted queue for storing messages as they travel between -// applications or microservices. Amazon SQS moves data between distributed -// application components and helps you decouple these components. For information -// on the permissions you need to use this API, see Identity and access management (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-authentication-and-access-control.html) -// in the Amazon SQS Developer Guide. You can use Amazon Web Services SDKs (http://aws.amazon.com/tools/#sdk) -// to access Amazon SQS using your favorite programming language. The SDKs perform -// tasks such as the following automatically: +// Welcome to the Amazon SQS API Reference. +// +// Amazon SQS is a reliable, highly-scalable hosted queue for storing messages as +// they travel between applications or microservices. Amazon SQS moves data between +// distributed application components and helps you decouple these components. +// +// For information on the permissions you need to use this API, see [Identity and access management] in the Amazon +// SQS Developer Guide. +// +// You can use [Amazon Web Services SDKs] to access Amazon SQS using your favorite programming language. The +// SDKs perform tasks such as the following automatically: +// // - Cryptographically sign your service requests +// // - Retry requests +// // - Handle error responses // -// Additional information -// - Amazon SQS Product Page (http://aws.amazon.com/sqs/) +// # Additional information +// +// [Amazon SQS Product Page] +// // - Amazon SQS Developer Guide -// - Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) -// - Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) -// - Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// - Amazon SQS in the Command Line Interface (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) +// +// [Making API Requests] +// +// [Amazon SQS Message Attributes] +// +// [Amazon SQS Dead-Letter Queues] +// +// [Amazon SQS in the Command Line Interface] +// // - Amazon Web Services General Reference -// - Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) +// +// [Regions and Endpoints] +// +// [Identity and access management]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-authentication-and-access-control.html +// [Amazon SQS Product Page]: http://aws.amazon.com/sqs/ +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region +// [Amazon SQS Dead-Letter Queues]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html +// [Amazon Web Services SDKs]: http://aws.amazon.com/tools/#sdk +// [Making API Requests]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html +// [Amazon SQS Message Attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes +// [Amazon SQS in the Command Line Interface]: http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html package sqs diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/endpoints.go index 7c6bb020de5..64e7a3fdb9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go index f071426de7a..570036dd993 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go @@ -3,4 +3,4 @@ package sqs // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.4" +const goModuleVersion = "1.34.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/options.go index ce3b7c08a4f..92bc97d7253 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -54,8 +57,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -74,17 +79,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -101,8 +109,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -147,6 +156,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/serializers.go index ddac08da6a5..d93605345ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/serializers.go @@ -1466,6 +1466,17 @@ func awsAwsjson10_serializeDocumentMessageBodySystemAttributeMap(v map[string]ty return nil } +func awsAwsjson10_serializeDocumentMessageSystemAttributeList(v []types.MessageSystemAttributeName, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + func awsAwsjson10_serializeDocumentMessageSystemAttributeValue(v *types.MessageSystemAttributeValue, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -1907,6 +1918,13 @@ func awsAwsjson10_serializeOpDocumentReceiveMessageInput(v *ReceiveMessageInput, } } + if v.MessageSystemAttributeNames != nil { + ok := object.Key("MessageSystemAttributeNames") + if err := awsAwsjson10_serializeDocumentMessageSystemAttributeList(v.MessageSystemAttributeNames, ok); err != nil { + return err + } + } + if v.QueueUrl != nil { ok := object.Key("QueueUrl") ok.String(*v.QueueUrl) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/enums.go index dbdc81c1e2e..230b83627f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/enums.go @@ -6,6 +6,7 @@ type MessageSystemAttributeName string // Enum values for MessageSystemAttributeName const ( + MessageSystemAttributeNameAll MessageSystemAttributeName = "All" MessageSystemAttributeNameSenderId MessageSystemAttributeName = "SenderId" MessageSystemAttributeNameSentTimestamp MessageSystemAttributeName = "SentTimestamp" MessageSystemAttributeNameApproximateReceiveCount MessageSystemAttributeName = "ApproximateReceiveCount" @@ -19,9 +20,11 @@ const ( // Values returns all known values for MessageSystemAttributeName. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (MessageSystemAttributeName) Values() []MessageSystemAttributeName { return []MessageSystemAttributeName{ + "All", "SenderId", "SentTimestamp", "ApproximateReceiveCount", @@ -43,8 +46,9 @@ const ( // Values returns all known values for MessageSystemAttributeNameForSends. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MessageSystemAttributeNameForSends) Values() []MessageSystemAttributeNameForSends { return []MessageSystemAttributeNameForSends{ "AWSTraceHeader", @@ -80,8 +84,9 @@ const ( ) // Values returns all known values for QueueAttributeName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (QueueAttributeName) Values() []QueueAttributeName { return []QueueAttributeName{ "All", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/errors.go index 436fae9cd0f..7bbe0e5a878 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/errors.go @@ -320,7 +320,9 @@ func (e *KmsDisabled) ErrorCode() string { func (e *KmsDisabled) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The request was rejected for one of the following reasons: +// // - The KeyUsage value of the KMS key is incompatible with the API operation. +// // - The encryption algorithm or signing algorithm specified for the operation // is incompatible with the type of key material in the KMS key (KeySpec). type KmsInvalidKeyUsage struct { @@ -646,10 +648,13 @@ func (e *ReceiptHandleIsInvalid) ErrorCode() string { func (e *ReceiptHandleIsInvalid) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The request was denied due to request throttling. +// // - The rate of requests per second exceeds the Amazon Web Services KMS request // quota for an account and Region. +// // - A burst or sustained high rate of requests to change the state of the same // KMS key. This condition is often known as a "hot key." +// // - Requests for operations on KMS keys in a Amazon Web Services CloudHSM key // store might be throttled at a lower-than-expected rate when the Amazon Web // Services CloudHSM cluster associated with the Amazon Web Services CloudHSM key diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/types.go index 7bda19bc050..5dade2c0d71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/types/types.go @@ -31,14 +31,15 @@ type BatchResultErrorEntry struct { noSmithyDocumentSerde } -// Encloses a receipt handle and an entry ID for each message in -// ChangeMessageVisibilityBatch . +// Encloses a receipt handle and an entry ID for each message in ChangeMessageVisibilityBatch. type ChangeMessageVisibilityBatchRequestEntry struct { - // An identifier for this particular receipt handle used to communicate the - // result. The Id s of a batch request need to be unique within a request. This - // identifier can have up to 80 characters. The following characters are accepted: - // alphanumeric characters, hyphens(-), and underscores (_). + // An identifier for this particular receipt handle used to communicate the result. + // + // The Id s of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // This member is required. Id *string @@ -54,7 +55,7 @@ type ChangeMessageVisibilityBatchRequestEntry struct { noSmithyDocumentSerde } -// Encloses the Id of an entry in ChangeMessageVisibilityBatch . +// Encloses the Id of an entry in ChangeMessageVisibilityBatch. type ChangeMessageVisibilityBatchResultEntry struct { // Represents a message whose visibility timeout has been changed successfully. @@ -69,9 +70,12 @@ type ChangeMessageVisibilityBatchResultEntry struct { type DeleteMessageBatchRequestEntry struct { // The identifier for this particular receipt handle. This is used to communicate - // the result. The Id s of a batch request need to be unique within a request. This - // identifier can have up to 80 characters. The following characters are accepted: - // alphanumeric characters, hyphens(-), and underscores (_). + // the result. + // + // The Id s of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // This member is required. Id *string @@ -84,7 +88,7 @@ type DeleteMessageBatchRequestEntry struct { noSmithyDocumentSerde } -// Encloses the Id of an entry in DeleteMessageBatch . +// Encloses the Id of an entry in DeleteMessageBatch. type DeleteMessageBatchResultEntry struct { // Represents a successfully deleted message. @@ -102,7 +106,8 @@ type ListMessageMoveTasksResultEntry struct { ApproximateNumberOfMessagesMoved int64 // The number of messages to be moved from the source queue. This number is - // obtained at the time of starting the message movement task. + // obtained at the time of starting the message movement task and is only included + // after the message movement task is selected to start. ApproximateNumberOfMessagesToMove *int64 // The ARN of the destination queue if it has been specified in the @@ -140,18 +145,27 @@ type ListMessageMoveTasksResultEntry struct { // An Amazon SQS message. type Message struct { - // A map of the attributes requested in ReceiveMessage to their respective values. - // Supported attributes: + // A map of the attributes requested in ReceiveMessage to their respective values. Supported + // attributes: + // // - ApproximateReceiveCount + // // - ApproximateFirstReceiveTimestamp + // // - MessageDeduplicationId + // // - MessageGroupId + // // - SenderId + // // - SentTimestamp + // // - SequenceNumber + // // ApproximateFirstReceiveTimestamp and SentTimestamp are each returned as an - // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time) in - // milliseconds. + // integer representing the [epoch time]in milliseconds. + // + // [epoch time]: http://en.wikipedia.org/wiki/Unix_time Attributes map[string]string // The message's contents (not URL-encoded). @@ -163,12 +177,15 @@ type Message struct { // An MD5 digest of the non-URL-encoded message attribute string. You can use this // attribute to verify that Amazon SQS received the message correctly. Amazon SQS // URL-decodes the message before creating the MD5 digest. For information about - // MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt MD5OfMessageAttributes *string // Each message attribute consists of a Name , Type , and Value . For more - // information, see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon SQS Developer Guide. + // information, see [Amazon SQS message attributes]in the Amazon SQS Developer Guide. + // + // [Amazon SQS message attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes MessageAttributes map[string]MessageAttributeValue // A unique identifier for the message. A MessageId is considered unique across all @@ -185,16 +202,20 @@ type Message struct { // The user-specified message attribute value. For string data types, the Value // attribute has the same restrictions on the content as the message body. For more -// information, see SendMessage . Name , type , value and the message body must -// not be empty or null. All parts of the message attribute, including Name , Type -// , and Value , are part of the message size restriction (256 KiB or 262,144 -// bytes). +// information, see SendMessage. +// +// Name , type , value and the message body must not be empty or null. All parts +// of the message attribute, including Name , Type , and Value , are part of the +// message size restriction (256 KiB or 262,144 bytes). type MessageAttributeValue struct { // Amazon SQS supports the following logical data types: String , Number , and - // Binary . For the Number data type, you must use StringValue . You can also - // append custom labels. For more information, see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon SQS Developer Guide. + // Binary . For the Number data type, you must use StringValue . + // + // You can also append custom labels. For more information, see [Amazon SQS Message Attributes] in the Amazon SQS + // Developer Guide. + // + // [Amazon SQS Message Attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes // // This member is required. DataType *string @@ -209,9 +230,9 @@ type MessageAttributeValue struct { // Not implemented. Reserved for future use. StringListValues []string - // Strings are Unicode with UTF-8 binary encoding. For a list of code values, see - // ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . + // Strings are Unicode with UTF-8 binary encoding. For a list of code values, see [ASCII Printable Characters]. + // + // [ASCII Printable Characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters StringValue *string noSmithyDocumentSerde @@ -219,14 +240,18 @@ type MessageAttributeValue struct { // The user-specified message system attribute value. For string data types, the // Value attribute has the same restrictions on the content as the message body. -// For more information, see SendMessage . Name , type , value and the message -// body must not be empty or null. +// For more information, see SendMessage. +// +// Name , type , value and the message body must not be empty or null. type MessageSystemAttributeValue struct { // Amazon SQS supports the following logical data types: String , Number , and - // Binary . For the Number data type, you must use StringValue . You can also - // append custom labels. For more information, see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon SQS Developer Guide. + // Binary . For the Number data type, you must use StringValue . + // + // You can also append custom labels. For more information, see [Amazon SQS Message Attributes] in the Amazon SQS + // Developer Guide. + // + // [Amazon SQS Message Attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes // // This member is required. DataType *string @@ -241,9 +266,9 @@ type MessageSystemAttributeValue struct { // Not implemented. Reserved for future use. StringListValues []string - // Strings are Unicode with UTF-8 binary encoding. For a list of code values, see - // ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . + // Strings are Unicode with UTF-8 binary encoding. For a list of code values, see [ASCII Printable Characters]. + // + // [ASCII Printable Characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters StringValue *string noSmithyDocumentSerde @@ -252,10 +277,12 @@ type MessageSystemAttributeValue struct { // Contains the details of a single Amazon SQS message along with an Id . type SendMessageBatchRequestEntry struct { - // An identifier for a message in this batch used to communicate the result. The Id - // s of a batch request need to be unique within a request. This identifier can - // have up to 80 characters. The following characters are accepted: alphanumeric - // characters, hyphens(-), and underscores (_). + // An identifier for a message in this batch used to communicate the result. + // + // The Id s of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // This member is required. Id *string @@ -268,78 +295,107 @@ type SendMessageBatchRequestEntry struct { // The length of time, in seconds, for which a specific message is delayed. Valid // values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds // value become available for processing after the delay period is finished. If you - // don't specify a value, the default value for the queue is applied. When you set - // FifoQueue , you can't set DelaySeconds per message. You can set this parameter - // only on a queue level. + // don't specify a value, the default value for the queue is applied. + // + // When you set FifoQueue , you can't set DelaySeconds per message. You can set + // this parameter only on a queue level. DelaySeconds int32 // Each message attribute consists of a Name , Type , and Value . For more - // information, see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon SQS Developer Guide. + // information, see [Amazon SQS message attributes]in the Amazon SQS Developer Guide. + // + // [Amazon SQS message attributes]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes MessageAttributes map[string]MessageAttributeValue - // This parameter applies only to FIFO (first-in-first-out) queues. The token used - // for deduplication of messages within a 5-minute minimum deduplication interval. - // If a message with a particular MessageDeduplicationId is sent successfully, - // subsequent messages with the same MessageDeduplicationId are accepted - // successfully but aren't delivered. For more information, see Exactly-once - // processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon SQS Developer Guide. + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The token used for deduplication of messages within a 5-minute minimum + // deduplication interval. If a message with a particular MessageDeduplicationId + // is sent successfully, subsequent messages with the same MessageDeduplicationId + // are accepted successfully but aren't delivered. For more information, see [Exactly-once processing]in + // the Amazon SQS Developer Guide. + // // - Every message must have a unique MessageDeduplicationId , + // // - You may provide a MessageDeduplicationId explicitly. + // // - If you aren't able to provide a MessageDeduplicationId and you enable // ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to // generate the MessageDeduplicationId using the body of the message (but not the // attributes of the message). + // // - If you don't provide a MessageDeduplicationId and the queue doesn't have // ContentBasedDeduplication set, the action fails with an error. + // // - If the queue has ContentBasedDeduplication set, your MessageDeduplicationId // overrides the generated one. + // // - When ContentBasedDeduplication is in effect, messages with identical content // sent within the deduplication interval are treated as duplicates and only one // copy of the message is delivered. + // // - If you send one message with ContentBasedDeduplication enabled and then // another message with a MessageDeduplicationId that is the same as the one // generated for the first MessageDeduplicationId , the two messages are treated // as duplicates and only one copy of the message is delivered. + // // The MessageDeduplicationId is available to the consumer of the message (this - // can be useful for troubleshooting delivery issues). If a message is sent - // successfully but the acknowledgement is lost and the message is resent with the - // same MessageDeduplicationId after the deduplication interval, Amazon SQS can't - // detect duplicate messages. Amazon SQS continues to keep track of the message - // deduplication ID even after the message is received and deleted. The length of - // MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain - // alphanumeric characters ( a-z , A-Z , 0-9 ) and punctuation ( - // !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). For best practices of using - // MessageDeduplicationId , see Using the MessageDeduplicationId Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon SQS Developer Guide. + // can be useful for troubleshooting delivery issues). + // + // If a message is sent successfully but the acknowledgement is lost and the + // message is resent with the same MessageDeduplicationId after the deduplication + // interval, Amazon SQS can't detect duplicate messages. + // + // Amazon SQS continues to keep track of the message deduplication ID even after + // the message is received and deleted. + // + // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId + // can contain alphanumeric characters ( a-z , A-Z , 0-9 ) and punctuation ( + // !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ ). + // + // For best practices of using MessageDeduplicationId , see [Using the MessageDeduplicationId Property] in the Amazon SQS + // Developer Guide. + // + // [Using the MessageDeduplicationId Property]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html + // [Exactly-once processing]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html MessageDeduplicationId *string - // This parameter applies only to FIFO (first-in-first-out) queues. The tag that - // specifies that a message belongs to a specific message group. Messages that - // belong to the same message group are processed in a FIFO manner (however, - // messages in different message groups might be processed out of order). To - // interleave multiple ordered streams within a single queue, use MessageGroupId + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The tag that specifies that a message belongs to a specific message group. + // Messages that belong to the same message group are processed in a FIFO manner + // (however, messages in different message groups might be processed out of order). + // To interleave multiple ordered streams within a single queue, use MessageGroupId // values (for example, session data for multiple users). In this scenario, // multiple consumers can process the queue, but the session data of each user is // processed in a FIFO fashion. + // // - You must associate a non-empty MessageGroupId with a message. If you don't // provide a MessageGroupId , the action fails. + // // - ReceiveMessage might return messages with multiple MessageGroupId values. // For each MessageGroupId , the messages are sorted by time sent. The caller // can't specify a MessageGroupId . + // // The length of MessageGroupId is 128 characters. Valid values: alphanumeric - // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~) . For best - // practices of using MessageGroupId , see Using the MessageGroupId Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon SQS Developer Guide. MessageGroupId is required for FIFO queues. - // You can't use it for Standard queues. + // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~) . + // + // For best practices of using MessageGroupId , see [Using the MessageGroupId Property] in the Amazon SQS Developer + // Guide. + // + // MessageGroupId is required for FIFO queues. You can't use it for Standard + // queues. + // + // [Using the MessageGroupId Property]: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html MessageGroupId *string // The message system attribute to send Each message system attribute consists of // a Name , Type , and Value . + // // - Currently, the only supported message system attribute is AWSTraceHeader . // Its type must be String and its value must be a correctly formatted X-Ray // trace header string. + // // - The size of a message system attribute doesn't count towards the total size // of a message. MessageSystemAttributes map[string]MessageSystemAttributeValue @@ -347,7 +403,7 @@ type SendMessageBatchRequestEntry struct { noSmithyDocumentSerde } -// Encloses a MessageId for a successfully-enqueued message in a SendMessageBatch . +// Encloses a MessageId for a successfully-enqueued message in a SendMessageBatch. type SendMessageBatchResultEntry struct { // An identifier for the message in this batch. @@ -358,7 +414,9 @@ type SendMessageBatchResultEntry struct { // An MD5 digest of the non-URL-encoded message body string. You can use this // attribute to verify that Amazon SQS received the message correctly. Amazon SQS // URL-decodes the message before creating the MD5 digest. For information about - // MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt // // This member is required. MD5OfMessageBody *string @@ -371,19 +429,25 @@ type SendMessageBatchResultEntry struct { // An MD5 digest of the non-URL-encoded message attribute string. You can use this // attribute to verify that Amazon SQS received the message correctly. Amazon SQS // URL-decodes the message before creating the MD5 digest. For information about - // MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt MD5OfMessageAttributes *string // An MD5 digest of the non-URL-encoded message system attribute string. You can // use this attribute to verify that Amazon SQS received the message correctly. // Amazon SQS URL-decodes the message before creating the MD5 digest. For - // information about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt) . + // information about MD5, see [RFC1321]. + // + // [RFC1321]: https://www.ietf.org/rfc/rfc1321.txt MD5OfMessageSystemAttributes *string - // This parameter applies only to FIFO (first-in-first-out) queues. The large, - // non-consecutive number that Amazon SQS assigns to each message. The length of - // SequenceNumber is 128 bits. As SequenceNumber continues to increase for a - // particular MessageGroupId . + // This parameter applies only to FIFO (first-in-first-out) queues. + // + // The large, non-consecutive number that Amazon SQS assigns to each message. + // + // The length of SequenceNumber is 128 bits. As SequenceNumber continues to + // increase for a particular MessageGroupId . SequenceNumber *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 4a3e25ac1f1..03f870f4f8b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,61 @@ +# v1.22.4 (2024-07-18) + +* No change notes available for this release. + +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-05-23) + +* No change notes available for this release. + +# v1.20.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.20.5 (2024-04-05) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index fff457735be..a06c6e738fc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS Single Sign-On. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -441,6 +450,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -484,6 +517,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 4b21e8b00a9..5ce00b4961b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -30,9 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -113,6 +114,12 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index e44da697c55..f20e3acbfc9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -29,9 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -118,6 +119,12 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { return err } @@ -142,14 +149,6 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack return nil } -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - // ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles type ListAccountRolesPaginatorOptions struct { // The number of items that clients can request per page. @@ -213,6 +212,9 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -232,6 +234,14 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func return result, nil } +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 2d7add067fa..391b567db95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,9 +12,10 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. +// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity +// Center User Guide. This operation returns a paginated response. +// +// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { if params == nil { params = &ListAccountsInput{} @@ -32,9 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -116,6 +118,12 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountsValidationMiddleware(stack); err != nil { return err } @@ -140,13 +148,6 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op return nil } -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - // ListAccountsPaginatorOptions is the paginator options for ListAccounts type ListAccountsPaginatorOptions struct { // This is the number of items clients can request per page. @@ -210,6 +211,9 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +233,13 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op return result, nil } +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 3ee682d19e0..456e4a37170 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -12,16 +12,20 @@ import ( // Removes the locally stored SSO tokens from the client-side cache and sends an // API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. If a user uses IAM Identity -// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is -// used to obtain an IAM session, as specified in the corresponding IAM Identity -// Center permission set. More specifically, IAM Identity Center assumes an IAM -// role in the target account on behalf of the user, and the corresponding -// temporary AWS credentials are returned to the client. After user logout, any -// existing IAM role sessions that were created by using IAM Identity Center -// permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, IAM +// Identity Center assumes an IAM role in the target account on behalf of the user, +// and the corresponding temporary AWS credentials are returned to the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured in +// the permission set. For more information, see [User authentications]in the IAM Identity Center User +// Guide. +// +// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -39,9 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -108,6 +113,12 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpLogoutValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index 3b28e825dd0..a93a77cd7fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -169,7 +169,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 8bba205f435..d6297fa6a15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -13,12 +13,22 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpGetRoleCredentials struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index 59456d5dc27..7f6e429fda8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -6,16 +6,22 @@ // AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web // service that makes it easy for you to assign user access to IAM Identity Center // resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. Although AWS -// Single Sign-On was renamed, the sso and identitystore API namespaces will -// continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . This reference guide describes the IAM Identity Center Portal operations that +// and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API +// namespaces will continue to retain their original name for backward +// compatibility purposes. For more information, see [IAM Identity Center rename]. +// +// This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and -// errors. AWS provides SDKs that consist of libraries and sample code for various +// errors. +// +// AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// . +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 76521eec0e5..75ae283ef86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 44379817e85..f252ba39e80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.5" +const goModuleVersion = "1.22.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index 2c3a77ce306..d522129e768 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -235,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 5dee7e53f47..0ba182e976f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 8dc02296b11..07ac468e318 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -25,22 +25,24 @@ type AccountInfo struct { type RoleCredentials struct { // The identifier used for the temporary security credentials. For more - // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html AccessKeyId *string // The date on which temporary security credentials expire. Expiration int64 - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SecretAccessKey *string - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SessionToken *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 053f180bf68..adf698c8027 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,65 @@ +# v1.26.4 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2024-07-03) + +* No change notes available for this release. + +# v1.26.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.25.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-05-23) + +* No change notes available for this release. + +# v1.24.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-05-10) + +* **Feature**: Updated request parameters for PKCE support. + +# v1.23.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.23.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 8dc643bb0c5..25cd1c04882 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS SSO OIDC. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -441,6 +450,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -484,6 +517,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 63f1eeb1312..8b829188eb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -32,34 +32,43 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF type CreateTokenInput struct { // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClient API. + // from the result of the RegisterClientAPI. // // This member is required. ClientId *string // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClient API. + // persisted result of the RegisterClientAPI. // // This member is required. ClientSecret *string // Supports the following OAuth grant types: Device Code and Refresh Token. // Specify either of the following values, depending on the grant type that you - // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh - // Token - refresh_token For information about how to obtain the device code, see - // the StartDeviceAuthorization topic. + // want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization topic. // // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateToken API. + // is currently unsupported for the CreateTokenAPI. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorization API. + // of the StartDeviceAuthorizationAPI. DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -69,16 +78,18 @@ type CreateTokenInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is requested. The access token that // is issued is limited to the scopes that are granted. If this value is not // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient . + // client during the call to RegisterClient. Scope []string noSmithyDocumentSerde @@ -86,7 +97,8 @@ type CreateTokenInput struct { type CreateTokenOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -94,18 +106,22 @@ type CreateTokenOutput struct { // The idToken is not implemented or supported. For more information about the // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A JSON Web Token (JWT) that identifies who is associated with the issued - // access token. + // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. + // + // A JSON Web Token (JWT) that identifies who is associated with the issued access + // token. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html IdToken *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used to notify the client that the returned token is an access token. The @@ -170,6 +186,12 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index 63409538940..af04c251a2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -12,8 +12,8 @@ import ( // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// short-term credentials for the assigned Amazon Web Services accounts or to +// access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { params = &CreateTokenWithIAMInput{} @@ -39,10 +39,15 @@ type CreateTokenWithIAMInput struct { // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: * Authorization Code - authorization_code * - // Refresh Token - refresh_token * JWT Bearer - - // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - - // urn:ietf:params:oauth:grant-type:token-exchange + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange // // This member is required. GrantType *string @@ -59,6 +64,11 @@ type CreateTokenWithIAMInput struct { // in the Authorization Code GrantOptions for the application. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Authorization Code grant type. This // value specifies the location of the client or application that has registered to // receive the authorization code. @@ -66,16 +76,21 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that the requester can receive. The following values - // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * - // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + // are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token RequestedTokenType *string // The list of scopes for which authorization is requested. The access token that @@ -94,8 +109,9 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token + // following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token SubjectTokenType *string noSmithyDocumentSerde @@ -103,7 +119,8 @@ type CreateTokenWithIAMInput struct { type CreateTokenWithIAMOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -114,17 +131,21 @@ type CreateTokenWithIAMOutput struct { IdToken *string // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token * Refresh Token - - // urn:ietf:params:oauth:token-type:refresh_token + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token IssuedTokenType *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is granted. The access token that is @@ -196,6 +217,12 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 09f016ec1ef..d8c766c989e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -41,6 +41,25 @@ type RegisterClientInput struct { // This member is required. ClientType *string + // This IAM Identity Center application ARN is used to define + // administrator-managed configuration for public client access to resources. At + // authorization, the scopes, grants, and redirect URI available to this client + // will be restricted by this application resource. + EntitledApplicationArn *string + + // The list of OAuth 2.0 grant types that are defined by the client. This list is + // used to restrict the token granting flows available to the client. + GrantTypes []string + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent can + // be redirected back to. + RedirectUris []string + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []string @@ -128,6 +147,12 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index c568805b226..7c2b38ba902 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -30,22 +30,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the - // RegisterClient API operation. + // Identity Center. This value should come from the persisted result of the RegisterClientAPI + // operation. // // This member is required. ClientId *string // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClient API operation. + // the persisted result of the RegisterClientAPI operation. // // This member is required. ClientSecret *string - // The URL for the Amazon Web Services access portal. For more information, see - // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] // in the IAM Identity Center User Guide. // + // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + // // This member is required. StartUrl *string @@ -136,6 +137,12 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index 40b3becb9f2..e6058da813f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -163,7 +163,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index 76a1160eceb..05e8c6b7e5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -13,11 +13,21 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpCreateToken struct { } @@ -581,12 +591,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response case strings.EqualFold("InvalidClientMetadataException", errorCode): return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + case strings.EqualFold("InvalidRedirectUriException", errorCode): + return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) + case strings.EqualFold("InvalidRequestException", errorCode): return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) case strings.EqualFold("InvalidScopeException", errorCode): return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1158,6 +1174,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res return output } +func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRedirectUriException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidRequestException{} var buff [1024]byte @@ -1717,6 +1769,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran return nil } +func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRedirectUriException + if *v == nil { + sv = &types.InvalidRedirectUriException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 53cd4f55a03..1d258e5677b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -6,33 +6,41 @@ // IAM Identity Center OpenID Connect (OIDC) is a web service that enables a // client (such as CLI or a native application) to register with IAM Identity // Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. IAM -// Identity Center uses the sso and identitystore API namespaces. Considerations -// for Using This Guide Before you begin using this guide, we recommend that you -// first review the following important information about how the IAM Identity -// Center OIDC service works. +// upon successful authentication and authorization with IAM Identity Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// // - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ( -// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the CLI. +// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to +// enable single sign-on authentication with the CLI. +// // - With older versions of the CLI, the service only emits OIDC access tokens, // so to obtain a new token, users must explicitly re-authenticate. To access the // OIDC flow that supports token refresh and doesn’t require re-authentication, // update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with // support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see Configure Amazon Web Services access -// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) -// . +// durations. For more information, see [Configure Amazon Web Services access portal session duration]. +// // - The access tokens provided by this service grant access to all Amazon Web // Services account entitlements assigned to an IAM Identity Center user, not just // a particular application. +// // - The documentation in this guide does not describe the mechanism to convert // the access token into Amazon Web Services Auth (“sigv4”) credentials for use // with IAM-protected Amazon Web Services service endpoints. For more information, -// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. +// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity +// Center User Guide. // -// For general information about IAM Identity Center, see What is IAM Identity -// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the IAM Identity Center User Guide. +// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html +// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html +// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 +// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 94e835e7115..d7099721fe8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -288,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +476,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -495,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -504,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index cbc7e8415f9..b71407f8dbe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.4" +const goModuleVersion = "1.26.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index 843edb07428..4a29eaa20b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -235,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index b964e7e1090..a012e4cb8d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index 754218b78e2..04411bd6167 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -95,6 +95,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.DeviceCode != nil { ok := object.Key("deviceCode") ok.String(*v.DeviceCode) @@ -207,6 +212,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithI ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.GrantType != nil { ok := object.Key("grantType") ok.String(*v.GrantType) @@ -324,6 +334,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, ok.String(*v.ClientType) } + if v.EntitledApplicationArn != nil { + ok := object.Key("entitledApplicationArn") + ok.String(*v.EntitledApplicationArn) + } + + if v.GrantTypes != nil { + ok := object.Key("grantTypes") + if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { + return err + } + } + + if v.IssuerUrl != nil { + ok := object.Key("issuerUrl") + ok.String(*v.IssuerUrl) + } + + if v.RedirectUris != nil { + ok := object.Key("redirectUris") + if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { + return err + } + } + if v.Scopes != nil { ok := object.Key("scopes") if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { @@ -419,6 +453,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic return nil } +func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index 86b62049fd9..2cfe7b48fed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -188,7 +188,7 @@ func (e *InvalidClientMetadataException) ErrorCode() string { func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateToken request with an invalid grant type. +// makes a CreateTokenrequest with an invalid grant type. type InvalidGrantException struct { Message *string @@ -217,6 +217,36 @@ func (e *InvalidGrantException) ErrorCode() string { } func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRedirectUriException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRedirectUriException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRedirectUriException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Indicates that something is wrong with the input to the request. For example, a // required parameter might be missing or out of range. type InvalidRequestException struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 2fd5d5a649b..f3128d75250 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.30.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.29.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2024-05-23) + +* No change notes available for this release. + +# v1.28.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.28.6 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 4d18dc86bd7..acd2b8e7a13 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -15,15 +15,18 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -34,6 +37,9 @@ const ServiceAPIVersion = "2011-06-15" // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -72,6 +78,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -233,15 +241,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -445,6 +454,30 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() @@ -488,6 +521,63 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index e0e2c9c2e8d..e74fc8ba9f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,69 +16,99 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service -// with the following exception: You cannot call the Amazon Web Services STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: a role -// trust policy that specifies who can assume the role, and a permissions policy -// that specifies what can be done with the role. You specify the trusted principal -// that is allowed to assume the role in the role trust policy. To assume a role -// from a different account, your Amazon Web Services account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when the -// role is created. That trust policy states which accounts are allowed to delegate -// that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the account -// administrator. The administrator must attach a policy that allows the user to -// call AssumeRole for the ARN of the role in the other account. To allow a user -// to assume a role in the same account, you can do either of the following: +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the +// IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: You +// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies what +// can be done with the role. You specify the trusted principal that is allowed to +// assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have +// permissions that are delegated from the account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of the +// following: +// // - Attach a policy to the user that allows the user to call AssumeRole (as long // as the role's trust policy trusts the account). +// // - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the same // account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your -// session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole . This is -// useful for cross-account scenarios to ensure that the user that assumes the role -// has been authenticated with an Amazon Web Services MFA device. In that scenario, -// the trust policy of the role being assumed includes a condition that tests for -// MFA authentication. If the caller does not include valid MFA information, the -// request to assume the role is denied. The condition in a trust policy that tests -// for MFA authentication might look like the following example. "Condition": -// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for -// the SerialNumber and TokenCode parameters. The SerialNumber value identifies -// the user's hardware or virtual MFA device. The TokenCode is the time-based -// one-time password (TOTP) that the MFA device produces. +// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM +// User Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information when +// you call AssumeRole . This is useful for cross-account scenarios to ensure that +// the user that assumes the role has been authenticated with an Amazon Web +// Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication might +// look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. +// +// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that the +// MFA device produces. +// +// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -101,17 +131,19 @@ type AssumeRoleInput struct { // This member is required. RoleArn *string - // An identifier for the assumed role session. Use the role session name to - // uniquely identify a session when the same role is assumed by different - // principals or for different reasons. In cross-account scenarios, the role - // session name is visible to, and can be logged by the account that owns the role. - // The role session name is also used in the ARN of the assumed role principal. - // This means that subsequent cross-account API requests that use the temporary - // security credentials will expose the role session name to the external account - // in their CloudTrail logs. The regex used to validate this parameter is a string - // of characters consisting of upper- and lower-case alphanumeric characters with - // no spaces. You can also include underscores or any of the following characters: - // =,.@- + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role is + // assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the account + // that owns the role. The role session name is also used in the ARN of the assumed + // role principal. This means that subsequent cross-account API requests that use + // the temporary security credentials will expose the role session name to the + // external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -122,23 +154,27 @@ type AssumeRoleInput struct { // hours. If you specify a value higher than this setting or the administrator // setting (whichever is lower), the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web - // Services CLI or Amazon Web Services API role session to a maximum of one hour. - // When you use the AssumeRole API operation to assume a role, you can specify the - // duration of your role session with the DurationSeconds parameter. You can - // specify a parameter value of up to 43200 seconds (12 hours), depending on the - // maximum session duration setting for your role. However, if you assume a role - // using role chaining and provide a DurationSeconds parameter value greater than - // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API + // role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of up to + // 43200 seconds (12 hours), depending on the maximum session duration setting for + // your role. However, if you assume a role using role chaining and provide a + // DurationSeconds parameter value greater than one hour, the operation fails. To + // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // A unique identifier that might be required when you assume a role in another @@ -149,63 +185,79 @@ type AssumeRoleInput struct { // the administrator of the trusting account might send an external ID to the // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about - // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@:/- + // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + // + // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html ExternalId *string // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of previously acquired trusted context assertions in the format of a // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. The following is an example of a ProvidedContext value that - // includes a single trusted context assertion and the ARN of the context provider - // from which the trusted context assertion was generated. - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + // Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which the + // trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []types.ProvidedContext // The identification number of the MFA device that is associated with the user @@ -213,79 +265,97 @@ type AssumeRoleInput struct { // the role being assumed includes a condition that requires MFA authentication. // The value is either the serial number for a hardware device (such as // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter - // is a string of characters consisting of upper- and lower-case alphanumeric - // characters with no spaces. You can also include underscores or any of the - // following characters: =,.@- + // arn:aws:iam::123456789012:mfa/user ). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws: . This prefix is - // reserved for Amazon Web Services internal use. + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters, and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the role. When you do, session - // tags override a role tag with the same key. Tag key–value pairs are not case - // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the Department = - // Marketing tag and you pass the department = engineering session tag. Department - // and department are not saved as separate tags, and the session tag passed in - // the request takes precedence over the role tag. Additionally, if you used - // temporary credentials to perform this operation, the new session inherits any - // transitive session tags from the calling session. If you pass a session tag with - // the same key as an inherited tag, the operation fails. To view the inherited - // tags for a session, see the CloudTrail logs. For more information, see Viewing - // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // Additionally, if you used temporary credentials to perform this operation, the + // new session inherits any transitive session tags from the calling session. If + // you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. For + // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. + // + // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs Tags []types.Tag // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA. (In other words, if the policy includes a condition that // tests for MFA). If the role being assumed requires MFA and if the TokenCode // value is missing or expired, the AssumeRole call returns an "access denied" - // error. The format for this parameter, as described by its regex pattern, is a - // sequence of six numeric digits. + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string // A list of keys for session tags that you want to set as transitive. If you set // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. When you set session tags as - // transitive, the session policy and session tags packed binary limit is not - // affected. If you choose not to specify a transitive tag key, then no tags are - // passed from this session to any subsequent sessions. + // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed from + // this session to any subsequent sessions. + // + // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining TransitiveTagKeys []string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. +// Contains the response to a successful AssumeRole request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -296,9 +366,10 @@ type AssumeRoleOutput struct { AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -308,17 +379,21 @@ type AssumeRoleOutput struct { PackedPolicySize *int32 // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // Metadata pertaining to the operation's result. @@ -382,6 +457,12 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 2a57b72ac99..4c685abd5f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,92 +16,132 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this -// operation consist of an access key ID, a secret access key, and a security -// token. Applications can use these temporary security credentials to sign calls -// to Amazon Web Services services. Session Duration By default, the temporary -// security credentials created by AssumeRoleWithSAML last for one hour. However, -// you can use the optional DurationSeconds parameter to specify the duration of -// your session. Your role session lasts for the duration that you specify, or -// until the time specified in the SAML authentication response's -// SessionNotOnOrAfter value, whichever is shorter. You can provide a -// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour to 12 -// hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of an +// access key ID, a secret access key, and a security token. Applications can use +// these temporary security credentials to sign calls to Amazon Web Services +// services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML +// authentication response's SessionNotOnOrAfter value, whichever is shorter. You +// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the +// maximum session duration setting for the role. This setting can have a value +// from 1 hour to 12 hours. To learn how to view the maximum value for your role, +// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console URL. +// For more information, see [Using IAM Roles]in the IAM User Guide. +// +// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. // You can specify a parameter value of up to 43200 seconds (12 hours), depending // on the maximum session duration setting for your role. However, if you assume a // role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. Permissions The temporary security -// credentials created by AssumeRoleWithSAML can be used to make API calls to any -// Amazon Web Services service with the following exception: you cannot call the -// STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of -// Amazon Web Services security credentials. The identity of the caller is -// validated by using keys in the metadata document that is uploaded for the SAML -// provider entity for your identity provider. Calling AssumeRoleWithSAML can -// result in an entry in your CloudTrail logs. The entry includes the value in the -// NameID element of the SAML assertion. We recommend that you use a NameIDType -// that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier ( -// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can -// configure your IdP to pass attributes into your SAML assertion as session tags. -// Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, session tags -// override the role's tags with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to -// issue the claims required by Amazon Web Services. Additionally, you must use -// Identity and Access Management (IAM) to create a SAML provider entity in your -// Amazon Web Services account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used to +// make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys in +// the metadata document that is uploaded for the SAML provider entity for your +// identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The +// entry includes the value in the NameID element of the SAML assertion. We +// recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the +// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML +// assertion as session tags. Each session tag consists of a key name and an +// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, session tags override the role's tags with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML , you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web +// Services. Additionally, you must use Identity and Access Management (IAM) to +// create a SAML provider entity in your Amazon Web Services account that +// represents your identity provider. You must also create an IAM role that +// specifies this SAML provider in its trust policy. +// // For more information, see the following resources: -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// +// [About SAML 2.0-based Federation] +// - in the IAM User Guide. +// +// [Creating SAML Identity Providers] +// - in the IAM User Guide. +// +// [Configuring a Relying Party and Claims] +// - in the IAM User Guide. +// +// [Creating a Role for SAML 2.0 Federation] +// - in the IAM User Guide. +// +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html +// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -130,9 +170,11 @@ type AssumeRoleWithSAMLInput struct { // This member is required. RoleArn *string - // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. + // + // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html // // This member is required. SAMLAssertion *string @@ -146,92 +188,114 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithSAMLOutput struct { // The identifiers for the temporary security credentials that the operation // returns. AssumedRoleUser *types.AssumedRoleUser - // The value of the Recipient attribute of the SubjectConfirmationData element of + // The value of the Recipient attribute of the SubjectConfirmationData element of // the SAML assertion. Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // The value of the Issuer element of the SAML assertion. Issuer *string // A hash value based on the concatenation of the following: + // // - The Issuer response value. + // // - The Amazon Web Services account ID. + // // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. The following pseudocode shows how the hash value is calculated: BASE64 ( - // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -240,31 +304,36 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can - // require users to set a source identity value when they assume a role. You do - // this by using the sts:SourceIdentity condition key in a role trust policy. That - // way, actions that are taken with the role are associated with that user. After - // the source identity is set, the value cannot be changed. It is present in the - // request for all actions that are taken by the role and persists across chained - // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML - // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your SAML identity provider to use an + // attribute associated with your users, like user name or email, as the source + // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to + // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in + // the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // The value of the NameID element in the Subject element of the SAML assertion. Subject *string - // The format of the name ID, as defined by the Format attribute in the NameID + // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent . If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // persistent . + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , + // that prefix is removed. For example, // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . // If the format includes any other prefix, the format is returned with no // modifications. @@ -328,6 +397,12 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 98108ce6af0..0b5e5a377c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,105 +14,143 @@ import ( // Returns a set of temporary security credentials for users who have been // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// . For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. To learn more about Amazon -// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not -// require the use of Amazon Web Services security credentials. Therefore, you can -// distribute an application (for example, on mobile devices) that requests -// temporary security credentials without including long-term Amazon Web Services -// credentials in the application. You also don't need to deploy server-based proxy -// services that use long-term Amazon Web Services credentials. Instead, the -// identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this API -// consist of an access key ID, a secret access key, and a security token. -// Applications can use these temporary security credentials to sign calls to -// Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for -// one hour. However, you can use the optional DurationSeconds parameter to -// specify the duration of your session. You can provide a value from 900 seconds -// (15 minutes) up to the maximum session duration setting for the role. This -// setting can have a value from 1 hour to 12 hours. To learn how to view the -// maximum value for your role, see View the Maximum Session Duration Setting for -// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web -// Services service with the following exception: you cannot call the STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass -// attributes into your web identity token as session tags. Each session tag -// consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, the session tag -// overrides the role tag with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity , you must have an identity token from a supported -// identity provider and create a role that the application can assume. The role -// that your application assumes must trust the identity provider that is -// associated with the identity token. In other words, the identity provider must -// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you could -// instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) -// . For more information about how to use web identity federation and the +// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also +// supply the user with a consistent identity throughout the lifetime of an +// application. +// +// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application (for +// example, on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service API +// operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by +// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional +// DurationSeconds parameter to specify the duration of your session. You can +// provide a value from 900 seconds (15 minutes) up to the maximum session duration +// setting for the role. This setting can have a value from 1 hour to 12 hours. To +// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. +// The maximum session duration limit applies when you use the AssumeRole* API +// operations or the assume-role* CLI commands. However the limit does not apply +// when you use those operations to create a console URL. For more information, see +// [Using IAM Roles]in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can be +// used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken API +// operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, the session tag overrides the role tag with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity , you must have an +// identity token from a supported identity provider and create a role that the +// application can assume. The role that your application assumes must trust the +// identity provider that is associated with the identity token. In other words, +// the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the [Subject]of the provided web identity token. We recommend +// that you avoid using any personally identifiable information (PII) in this +// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. +// +// For more information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// . -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) -// . Walk through the process of authenticating through Login with Amazon, +// +// [Using Web Identity Federation API Operations for Mobile Apps] +// - and [Federation Through a Web-based Identity Provider]. +// +// [Web Identity Federation Playground] +// - . Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then using // those credentials to make a request to Amazon Web Services. -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// . These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these +// +// [Amazon Web Services SDK for iOS Developer Guide] +// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the +// identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) -// . This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// +// [Web Identity Federation with Mobile Applications] +// - . This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon S3. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications +// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html +// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -139,10 +177,11 @@ type AssumeRoleWithWebIdentityInput struct { // identifier that is associated with the user who is using your application. That // way, the temporary security credentials that your application will use are // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. The regex used to - // validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@- + // assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -162,73 +201,90 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // The fully qualified host component of the domain name of the OAuth 2.0 identity // provider. Do not specify this value for an OpenID Connect identity provider. + // // Currently www.amazon.com and graph.facebook.com are the only supported identity // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. Do not specify this value for OpenID Connect ID tokens. + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. ProviderId *string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. +// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithWebIdentityOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -244,9 +300,10 @@ type AssumeRoleWithWebIdentityOutput struct { Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. The size of the security token that STS API - // operations return is not fixed. We strongly recommend that you make no - // assumptions about the maximum size. + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -255,30 +312,34 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect + // The issuing authority of the web identity token presented. For OpenID Connect // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed in // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. You can require users to set a source identity value - // when they assume a role. You do this by using the sts:SourceIdentity condition - // key in a role trust policy. That way, actions that are taken with the role are - // associated with that user. After the source identity is set, the value cannot be - // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon + // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string // The unique user identifier that is returned by the identity provider. This @@ -347,6 +408,12 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b4ad54ab2fa..b1f14d28ce2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -11,28 +11,39 @@ import ( ) // Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. For -// example, if a user is not authorized to perform an operation that he or she has -// requested, the request returns a Client.UnauthorizedOperation response (an HTTP -// 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. Only -// certain Amazon Web Services operations return an encoded authorization message. -// The documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. The message is -// encoded because the details of the authorization status can contain privileged -// information that the user who requested the operation should not see. To decode -// an authorization status message, a user must be granted permissions through an -// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) -// action. The decoded message includes the following type of information: +// an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he or she +// has requested, the request returns a Client.UnauthorizedOperation response (an +// HTTP 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether that +// operation returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation should +// not see. To decode an authorization status message, a user must be granted +// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( +// sts:DecodeAuthorizationMessage ) action. +// +// The decoded message includes the following type of information: +// // - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether a -// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User +// Guide. +// // - The principal who made the request. +// // - The requested action. +// // - The requested resource. +// // - The values of condition keys in the context of the user's request. +// +// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow +// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} @@ -127,6 +138,12 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index 1f7cbcc2bbb..3ba00873db9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -10,23 +10,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and -// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). -// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. When you pass an access key ID to this operation, it -// returns the ID of the Amazon Web Services account to which the keys belong. -// Access key IDs beginning with AKIA are long-term credentials for an IAM user or -// the Amazon Web Services account root user. Access key IDs beginning with ASIA -// are temporary credentials that are created using STS operations. If the account -// in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. This operation does not indicate the state of the access -// key. The key might be active, inactive, or deleted. Active keys might not have -// permissions to perform an operation. Providing a deleted access key might return -// an error that the key doesn't exist. +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, +// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, +// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access +// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs to +// you, you can sign in as the root user and review your root user access keys. +// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who +// requested the temporary credentials for an ASIA access key, view the STS events +// in your [CloudTrail logs]in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might be +// active, inactive, or deleted. Active keys might not have permissions to perform +// an operation. Providing a deleted access key might return an error that the key +// doesn't exist. +// +// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html +// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html +// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { if params == nil { params = &GetAccessKeyInfoInput{} @@ -44,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI type GetAccessKeyInfoInput struct { - // The identifier of an access key. This parameter allows (through its regex - // pattern) a string of characters that can consist of any upper- or lowercase - // letter or digit. + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters that + // can consist of any upper- or lowercase letter or digit. // // This member is required. AccessKeyId *string @@ -120,6 +129,12 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index acb7ede44fd..abac49ad2f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -12,13 +12,15 @@ import ( ) // Returns details about the IAM user or role whose credentials are used to call -// the operation. No permissions are required to perform this operation. If an -// administrator attaches a policy to your identity that explicitly denies access -// to the sts:GetCallerIdentity action, you can still perform this operation. -// Permissions are not required because the same information is returned when -// access is denied. To view an example response, see I Am Not Authorized to -// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. +// the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when access is denied. +// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. +// +// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { params = &GetCallerIdentityInput{} @@ -38,8 +40,8 @@ type GetCallerIdentityInput struct { noSmithyDocumentSerde } -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. +// Contains the response to a successful GetCallerIdentity request, including information about the +// entity making the request. type GetCallerIdentityOutput struct { // The Amazon Web Services account ID number of the account that owns or contains @@ -51,8 +53,10 @@ type GetCallerIdentityOutput struct { // The unique identifier of the calling entity. The exact value depends on the // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. + // the aws:userid column in the [Principal table]found on the Policy Variables reference page in + // the IAM User Guide. + // + // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable UserId *string // Metadata pertaining to the operation's result. @@ -116,6 +120,12 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 3679618cb5a..2bae67429f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -14,74 +14,100 @@ import ( // Returns a set of temporary security credentials (consisting of an access key // ID, a secret access key, and a security token) for a user. A typical use is in a // proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the -// GetFederationToken operation using the long-term security credentials of an IAM -// user. As a result, this call is appropriate in contexts where those credentials -// can be safeguarded, usually in a server-based application. For a comparison of -// GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Although it is possible to call GetFederationToken using -// the security credentials of an Amazon Web Services account root user rather than -// an IAM user that you create for the purpose of a proxy application, we do not -// recommend it. For more information, see Safeguard your root user credentials -// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. Session duration The temporary credentials are valid for -// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the root user credentials have a maximum -// duration of 3,600 seconds (1 hour). Permissions You can use the temporary -// credentials created by GetFederationToken in any Amazon Web Services service -// with the following exceptions: +// distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based +// application. For a comparison of GetFederationToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security +// credentials of an Amazon Web Services account root user rather than an IAM user +// that you create for the purpose of a proxy application, we do not recommend it. +// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by +// using the root user credentials have a maximum duration of 3,600 seconds (1 +// hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM operations using the CLI or the Amazon Web Services // API. This limitation does not apply to console sessions. +// // - You cannot call any STS operations except GetCallerIdentity . // -// You can use temporary credentials for single sign-on (SSO) to the console. You -// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed [session policy] to this operation. You can pass a single +// JSON policy document to use as an inline session policy. You can also specify up +// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session +// policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. +// // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass // session policies, the session permissions are the intersection of the IAM user // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to create -// temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) -// . You can use the credentials to access a resource that has a resource-based +// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For +// information about using GetFederationToken to create temporary security +// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. +// +// You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by -// the session policies. Tags (Optional) You can pass tag key-value pairs to your -// session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is -// preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the Department = -// Marketing tag and you pass the department = engineering session tag. Department -// and department are not saved as separate tags, and the session tag passed in -// the request takes precedence over the user tag. +// the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This means +// that you cannot have separate Department and department tag keys. Assume that +// the user that you are federating has the Department = Marketing tag and you +// pass the department = engineering session tag. Department and department are +// not saved as separate tags, and the session tag passed in the request takes +// precedence over the user tag. +// +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito]: http://aws.amazon.com/cognito/ +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -102,10 +128,11 @@ type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the // temporary security credentials (such as Bob ). For example, you can reference // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. Name *string @@ -119,99 +146,127 @@ type GetFederationTokenInput struct { DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. This parameter is - // optional. However, if you do not pass any session policies, then the resulting - // federated user session has no permissions. When you pass session policies, the - // session permissions are the intersection of the IAM user policies and the - // session policies that you pass. This gives you a way to further restrict the - // permissions for a federated user. You cannot use session policies to grant more - // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // The plaintext that you use for both inline and managed session policies can't // exceed 2,048 characters. The JSON policy characters can be any ASCII character // from the space character to the end of the valid character list (\u0020 through // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. An Amazon Web Services conversion compresses the - // passed inline session policy, managed policy ARNs, and session tags into a - // packed binary format that has a separate limit. Your request can fail for this - // limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. The plaintext that you - // use for both inline and managed session policies can't exceed 2,048 characters. - // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. This parameter is optional. - // However, if you do not pass any session policies, then the resulting federated - // user session has no permissions. When you pass session policies, the session - // permissions are the intersection of the IAM user policies and the session - // policies that you pass. This gives you a way to further restrict the permissions - // for a federated user. You cannot use session policies to grant more permissions - // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // the IAM user that is requesting federated access. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. The plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. You can provide up to 10 managed policy + // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General + // Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext // meets the other requirements. The PackedPolicySize response element indicates // by percentage how close the policies and tags for your request are to the upper // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see Passing Session - // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the user you are federating. When - // you do, session tags override a user tag with the same key. Tag key–value pairs - // are not case sensitive, but case is preserved. This means that you cannot have - // separate Department and department tag keys. Assume that the role has the - // Department = Marketing tag and you pass the department = engineering session - // tag. Department and department are not saved as separate tags, and the session - // tag passed in the request takes precedence over the role tag. + // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User + // Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user tag + // with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length Tags []types.Tag noSmithyDocumentSerde } -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetFederationToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as @@ -287,6 +342,12 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 751fb147d4b..c73316a3c04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -15,43 +15,58 @@ import ( // IAM user. The credentials consist of an access key ID, a secret access key, and // a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and -// submit an MFA code that is associated with their MFA device. Using the temporary -// security credentials that the call returns, IAM users can then make programmatic -// calls to API operations that require MFA authentication. An incorrect MFA code -// causes the API to return an access denied error. For a comparison of -// GetSessionToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. No permissions are required for users to perform this -// operation. The purpose of the sts:GetSessionToken operation is to authenticate -// the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of an IAM -// user. Credentials that are created by IAM users are valid for the duration that -// you specify. This duration can range from 900 seconds (15 minutes) up to a -// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 -// hours). Credentials based on account credentials can range from 900 seconds (15 -// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The -// temporary security credentials created by GetSessionToken can be used to make -// API calls to any Amazon Web Services service with the following exceptions: +// Amazon EC2 StopInstances . +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is +// associated with their MFA device. Using the temporary security credentials that +// the call returns, IAM users can then make programmatic calls to API operations +// that require MFA authentication. An incorrect MFA code causes the API to return +// an access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose of +// the sts:GetSessionToken operation is to authenticate the user using MFA. You +// cannot use policies to control authentication operations. For more information, +// see [Permissions for GetSessionToken]in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon Web +// Services security credentials of an IAM user. Credentials that are created by +// IAM users are valid for the duration that you specify. This duration can range +// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), +// with a default of 43,200 seconds (12 hours). Credentials based on account +// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 +// hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used to +// make API calls to any Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM API operations unless MFA authentication // information is included in the request. +// // - You cannot call any STS API except AssumeRole or GetCallerIdentity . // // The credentials that GetSessionToken returns are based on permissions // associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. Although it -// is possible to call GetSessionToken using the security credentials of an Amazon -// Web Services account root user rather than an IAM user, we do not recommend it. -// If GetSessionToken is called using root user credentials, the temporary -// credentials have root user permissions. For more information, see Safeguard -// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide For more information about using GetSessionToken to -// create temporary credentials, see Temporary Credentials for Users in Untrusted -// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. +// The temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do not +// recommend it. If GetSessionToken is called using root user credentials, the +// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in +// the IAM User Guide +// +// For more information about using GetSessionToken to create temporary +// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. +// +// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} @@ -83,10 +98,11 @@ type GetSessionTokenInput struct { // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. The regex used - // to validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@:/- + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- SerialNumber *string // The value provided by the MFA device, if MFA is required. If any policy @@ -94,22 +110,24 @@ type GetSessionTokenInput struct { // authentication is required, the user must provide a code when requesting a set // of temporary security credentials. A user who fails to provide the code receives // an "access denied" response when requesting resources that require MFA - // authentication. The format for this parameter, as described by its regex - // pattern, is a sequence of six numeric digits. + // authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string noSmithyDocumentSerde } -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetSessionToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Metadata pertaining to the operation's result. @@ -173,6 +191,12 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index 9db5bfd4348..e842a7f7e8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -157,7 +157,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 5d634ce35c8..7e4346ec9fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -20,8 +20,17 @@ import ( "io" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsquery_deserializeOpAssumeRole struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index d963fd8d19a..cbb19c7f668 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -3,9 +3,11 @@ // Package sts provides the API client, operations, and parameter types for AWS // Security Token Service. // -// Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for users. This guide provides -// descriptions of the STS API. For more information about using this service, see -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// . +// # Security Token Service +// +// Security Token Service (STS) enables you to request temporary, +// limited-privilege credentials for users. This guide provides descriptions of the +// STS API. For more information about using this service, see [Temporary Security Credentials]. +// +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index 32e2d5435f4..35305d8976f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -306,6 +306,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1045,7 +1056,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -1075,6 +1086,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -1084,7 +1099,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 6e0f31d271c..84e221f2b96 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.6" +const goModuleVersion = "1.30.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index 5c1be79f8c0..a9a35881aff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 097875b279b..9573a4b6461 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -65,9 +65,10 @@ func (e *IDPCommunicationErrorException) ErrorCode() string { func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. If this error is returned for the -// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired -// or has been explicitly revoked. +// because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it can +// also mean that the claim has expired or has been explicitly revoked. type IDPRejectedClaimException struct { Message *string @@ -183,11 +184,13 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You could receive this error even though you meet other -// defined session policy and session tag limits. For more information, see IAM -// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. +// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. +// +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length type PackedPolicyTooLargeException struct { Message *string @@ -215,9 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index e3701d11d15..dff7a3c2e76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -11,10 +11,11 @@ import ( // returns. type AssumedRoleUser struct { - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // The ARN of the temporary security credentials that are returned from the AssumeRole + // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in + // the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -61,8 +62,9 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // see [IAM Identifiers]in the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -81,9 +83,10 @@ type FederatedUser struct { type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. + // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web + // Services General Reference. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html Arn *string noSmithyDocumentSerde @@ -107,23 +110,30 @@ type ProvidedContext struct { // You can pass custom key-value pair attributes when you assume a role or // federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. +// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User +// Guide. +// +// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html type Tag struct { - // The key for a session tag. You can pass up to 50 session tags. The plain text - // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Key *string - // The value for a session tag. You can pass up to 50 session tags. The plain text - // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Value *string diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 39ffae99938..bdbc7b43650 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,9 @@ +# Release (2024-06-27) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.3 + * **Bug Fix**: Fix encoding/cbor test overflow on x86. + # Release (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index a6b22f353d3..f82b767255a 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.2" +const goModuleVersion = "1.20.3" diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go index a67510c1c6d..ada64bcf331 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go @@ -4,8 +4,8 @@ import ( "crypto/rsa" "errors" "fmt" - "io/ioutil" "net/http" + "os" "strconv" "time" @@ -30,7 +30,7 @@ type AppsTransport struct { // NewAppsTransportKeyFromFile returns a AppsTransport using a private key from file. func NewAppsTransportKeyFromFile(tr http.RoundTripper, appID int64, privateKeyFile string) (*AppsTransport, error) { - privateKey, err := ioutil.ReadFile(privateKeyFile) + privateKey, err := os.ReadFile(privateKeyFile) if err != nil { return nil, fmt.Errorf("could not read private key: %s", err) } diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go index 0b33da4ae64..5518005dc82 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go @@ -7,13 +7,13 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" + "os" "strings" "sync" "time" - "github.com/google/go-github/v60/github" + "github.com/google/go-github/v62/github" ) const ( @@ -73,7 +73,7 @@ var _ http.RoundTripper = &Transport{} // NewKeyFromFile returns a Transport using a private key from file. func NewKeyFromFile(tr http.RoundTripper, appID, installationID int64, privateKeyFile string) (*Transport, error) { - privateKey, err := ioutil.ReadFile(privateKeyFile) + privateKey, err := os.ReadFile(privateKeyFile) if err != nil { return nil, fmt.Errorf("could not read private key: %s", err) } diff --git a/vendor/github.com/expr-lang/expr/README.md b/vendor/github.com/expr-lang/expr/README.md index 1a2d7dc8397..6c56c67b675 100644 --- a/vendor/github.com/expr-lang/expr/README.md +++ b/vendor/github.com/expr-lang/expr/README.md @@ -163,6 +163,8 @@ func main() { * [Akvorado](https://github.com/akvorado/akvorado) utilizes Expr to classify exporters and interfaces in network flows. * [keda.sh](https://keda.sh) uses Expr to allow customization of its Kubernetes-based event-driven autoscaling. * [Span Digital](https://spandigital.com/) uses Expr in it's Knowledge Management products. +* [Xiaohongshu](https://www.xiaohongshu.com/) combining yaml with Expr for dynamically policies delivery. +* [Melrōse](https://melrōse.org) uses Expr to implement its music programming language. [Add your company too](https://github.com/expr-lang/expr/edit/master/README.md) diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go index 394e0c1ad62..6a7d698a99f 100644 --- a/vendor/github.com/expr-lang/expr/ast/print.go +++ b/vendor/github.com/expr-lang/expr/ast/print.go @@ -215,7 +215,7 @@ func (n *PairNode) String() string { if utils.IsValidIdentifier(str.Value) { return fmt.Sprintf("%s: %s", str.Value, n.Value.String()) } - return fmt.Sprintf("%q: %s", str.String(), n.Value.String()) + return fmt.Sprintf("%s: %s", str.String(), n.Value.String()) } return fmt.Sprintf("(%s): %s", n.Key.String(), n.Value.String()) } diff --git a/vendor/github.com/expr-lang/expr/ast/visitor.go b/vendor/github.com/expr-lang/expr/ast/visitor.go index 287a7558967..90bc9f1d0ee 100644 --- a/vendor/github.com/expr-lang/expr/ast/visitor.go +++ b/vendor/github.com/expr-lang/expr/ast/visitor.go @@ -7,6 +7,9 @@ type Visitor interface { } func Walk(node *Node, v Visitor) { + if *node == nil { + return + } switch n := (*node).(type) { case *NilNode: case *IdentifierNode: diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go index efc01fc2f4e..cc6f197cdf2 100644 --- a/vendor/github.com/expr-lang/expr/builtin/builtin.go +++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go @@ -472,9 +472,27 @@ var Builtins = []*Function{ { Name: "now", Func: func(args ...any) (any, error) { - return time.Now(), nil + if len(args) == 0 { + return time.Now(), nil + } + if len(args) == 1 { + if tz, ok := args[0].(*time.Location); ok { + return time.Now().In(tz), nil + } + } + return nil, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) == 0 { + return timeType, nil + } + if len(args) == 1 { + if args[0] != nil && args[0].AssignableTo(locationType) { + return timeType, nil + } + } + return anyType, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) }, - Types: types(new(func() time.Time)), }, { Name: "duration", @@ -486,9 +504,17 @@ var Builtins = []*Function{ { Name: "date", Func: func(args ...any) (any, error) { + tz, ok := args[0].(*time.Location) + if ok { + args = args[1:] + } + date := args[0].(string) if len(args) == 2 { layout := args[1].(string) + if tz != nil { + return time.ParseInLocation(layout, date, tz) + } return time.Parse(layout, date) } if len(args) == 3 { @@ -515,18 +541,43 @@ var Builtins = []*Function{ time.RFC1123, } for _, layout := range layouts { - t, err := time.Parse(layout, date) - if err == nil { - return t, nil + if tz == nil { + t, err := time.Parse(layout, date) + if err == nil { + return t, nil + } + } else { + t, err := time.ParseInLocation(layout, date, tz) + if err == nil { + return t, nil + } } } return nil, fmt.Errorf("invalid date %s", date) }, - Types: types( - new(func(string) time.Time), - new(func(string, string) time.Time), - new(func(string, string, string) time.Time), - ), + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) < 1 { + return anyType, fmt.Errorf("invalid number of arguments (expected at least 1, got %d)", len(args)) + } + if args[0] != nil && args[0].AssignableTo(locationType) { + args = args[1:] + } + if len(args) > 3 { + return anyType, fmt.Errorf("invalid number of arguments (expected at most 3, got %d)", len(args)) + } + return timeType, nil + }, + }, + { + Name: "timezone", + Func: func(args ...any) (any, error) { + tz, err := time.LoadLocation(args[0].(string)) + if err != nil { + return nil, err + } + return tz, nil + }, + Types: types(time.LoadLocation), }, { Name: "first", diff --git a/vendor/github.com/expr-lang/expr/builtin/utils.go b/vendor/github.com/expr-lang/expr/builtin/utils.go index 7d3b6ee8e70..29a95731a05 100644 --- a/vendor/github.com/expr-lang/expr/builtin/utils.go +++ b/vendor/github.com/expr-lang/expr/builtin/utils.go @@ -3,14 +3,17 @@ package builtin import ( "fmt" "reflect" + "time" ) var ( - anyType = reflect.TypeOf(new(any)).Elem() - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) - arrayType = reflect.TypeOf([]any{}) - mapType = reflect.TypeOf(map[any]any{}) + anyType = reflect.TypeOf(new(any)).Elem() + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) + arrayType = reflect.TypeOf([]any{}) + mapType = reflect.TypeOf(map[any]any{}) + timeType = reflect.TypeOf(new(time.Time)).Elem() + locationType = reflect.TypeOf(new(time.Location)) ) func kind(t reflect.Type) reflect.Kind { diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go index ecf7a04d6e5..c71a98f07e7 100644 --- a/vendor/github.com/expr-lang/expr/checker/checker.go +++ b/vendor/github.com/expr-lang/expr/checker/checker.go @@ -13,6 +13,45 @@ import ( "github.com/expr-lang/expr/parser" ) +// ParseCheck parses input expression and checks its types. Also, it applies +// all provided patchers. In case of error, it returns error with a tree. +func ParseCheck(input string, config *conf.Config) (*parser.Tree, error) { + tree, err := parser.ParseWithConfig(input, config) + if err != nil { + return tree, err + } + + if len(config.Visitors) > 0 { + for i := 0; i < 1000; i++ { + more := false + for _, v := range config.Visitors { + // We need to perform types check, because some visitors may rely on + // types information available in the tree. + _, _ = Check(tree, config) + + ast.Walk(&tree.Node, v) + + if v, ok := v.(interface { + ShouldRepeat() bool + }); ok { + more = more || v.ShouldRepeat() + } + } + if !more { + break + } + } + } + _, err = Check(tree, config) + if err != nil { + return tree, err + } + + return tree, nil +} + +// Check checks types of the expression tree. It returns type of the expression +// and error if any. If config is nil, then default configuration will be used. func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) { if config == nil { config = conf.New(nil) @@ -1005,7 +1044,7 @@ func (v *checker) checkArguments( continue } - if !t.AssignableTo(in) && kind(t) != reflect.Interface { + if !(t.AssignableTo(in) || deref.Type(t).AssignableTo(in)) && kind(t) != reflect.Interface { return anyType, &file.Error{ Location: arg.Location(), Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name), @@ -1039,9 +1078,11 @@ func traverseAndReplaceIntegerNodesWithIntegerNodes(node *ast.Node, newType refl case *ast.IntegerNode: (*node).SetType(newType) case *ast.UnaryNode: + (*node).SetType(newType) unaryNode := (*node).(*ast.UnaryNode) traverseAndReplaceIntegerNodesWithIntegerNodes(&unaryNode.Node, newType) case *ast.BinaryNode: + // TODO: Binary node return type is dependent on the type of the operands. We can't just change the type of the node. binaryNode := (*node).(*ast.BinaryNode) switch binaryNode.Operator { case "+", "-", "*": diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go index 1aa5ce188b9..720f6a26528 100644 --- a/vendor/github.com/expr-lang/expr/compiler/compiler.go +++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go @@ -2,6 +2,7 @@ package compiler import ( "fmt" + "math" "reflect" "regexp" @@ -329,22 +330,46 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { case reflect.Int: c.emitPush(node.Value) case reflect.Int8: + if node.Value > math.MaxInt8 || node.Value < math.MinInt8 { + panic(fmt.Sprintf("constant %d overflows int8", node.Value)) + } c.emitPush(int8(node.Value)) case reflect.Int16: + if node.Value > math.MaxInt16 || node.Value < math.MinInt16 { + panic(fmt.Sprintf("constant %d overflows int16", node.Value)) + } c.emitPush(int16(node.Value)) case reflect.Int32: + if node.Value > math.MaxInt32 || node.Value < math.MinInt32 { + panic(fmt.Sprintf("constant %d overflows int32", node.Value)) + } c.emitPush(int32(node.Value)) case reflect.Int64: c.emitPush(int64(node.Value)) case reflect.Uint: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint", node.Value)) + } c.emitPush(uint(node.Value)) case reflect.Uint8: + if node.Value > math.MaxUint8 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint8", node.Value)) + } c.emitPush(uint8(node.Value)) case reflect.Uint16: + if node.Value > math.MaxUint16 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint16", node.Value)) + } c.emitPush(uint16(node.Value)) case reflect.Uint32: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint32", node.Value)) + } c.emitPush(uint32(node.Value)) case reflect.Uint64: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint64", node.Value)) + } c.emitPush(uint64(node.Value)) default: c.emitPush(node.Value) @@ -566,8 +591,8 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { } func (c *compiler) equalBinaryNode(node *ast.BinaryNode) { - l := kind(node.Left) - r := kind(node.Right) + l := kind(node.Left.Type()) + r := kind(node.Right.Type()) leftIsSimple := isSimpleType(node.Left) rightIsSimple := isSimpleType(node.Right) @@ -701,9 +726,44 @@ func (c *compiler) SliceNode(node *ast.SliceNode) { } func (c *compiler) CallNode(node *ast.CallNode) { - for _, arg := range node.Arguments { - c.compile(arg) + fn := node.Callee.Type() + if kind(fn) == reflect.Func { + fnInOffset := 0 + fnNumIn := fn.NumIn() + switch callee := node.Callee.(type) { + case *ast.MemberNode: + if prop, ok := callee.Property.(*ast.StringNode); ok { + if _, ok = callee.Node.Type().MethodByName(prop.Value); ok && callee.Node.Type().Kind() != reflect.Interface { + fnInOffset = 1 + fnNumIn-- + } + } + case *ast.IdentifierNode: + if t, ok := c.config.Types[callee.Value]; ok && t.Method { + fnInOffset = 1 + fnNumIn-- + } + } + for i, arg := range node.Arguments { + c.compile(arg) + if k := kind(arg.Type()); k == reflect.Ptr || k == reflect.Interface { + var in reflect.Type + if fn.IsVariadic() && i >= fnNumIn-1 { + in = fn.In(fn.NumIn() - 1).Elem() + } else { + in = fn.In(i + fnInOffset) + } + if k = kind(in); k != reflect.Ptr && k != reflect.Interface { + c.emit(OpDeref) + } + } + } + } else { + for _, arg := range node.Arguments { + c.compile(arg) + } } + if ident, ok := node.Callee.(*ast.IdentifierNode); ok { if c.config != nil { if fn, ok := c.config.Functions[ident.Value]; ok { @@ -1136,7 +1196,7 @@ func (c *compiler) PairNode(node *ast.PairNode) { } func (c *compiler) derefInNeeded(node ast.Node) { - switch kind(node) { + switch kind(node.Type()) { case reflect.Ptr, reflect.Interface: c.emit(OpDeref) } @@ -1155,8 +1215,7 @@ func (c *compiler) optimize() { } } -func kind(node ast.Node) reflect.Kind { - t := node.Type() +func kind(t reflect.Type) reflect.Kind { if t == nil { return reflect.Invalid } diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go index ba786c0174c..8c619e1c4db 100644 --- a/vendor/github.com/expr-lang/expr/expr.go +++ b/vendor/github.com/expr-lang/expr/expr.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "reflect" + "time" "github.com/expr-lang/expr/ast" "github.com/expr-lang/expr/builtin" @@ -12,7 +13,6 @@ import ( "github.com/expr-lang/expr/conf" "github.com/expr-lang/expr/file" "github.com/expr-lang/expr/optimizer" - "github.com/expr-lang/expr/parser" "github.com/expr-lang/expr/patcher" "github.com/expr-lang/expr/vm" ) @@ -183,6 +183,17 @@ func WithContext(name string) Option { }) } +// Timezone sets default timezone for date() and now() builtin functions. +func Timezone(name string) Option { + tz, err := time.LoadLocation(name) + if err != nil { + panic(err) + } + return Patch(patcher.WithTimezone{ + Location: tz, + }) +} + // Compile parses and compiles given input expression to bytecode program. func Compile(input string, ops ...Option) (*vm.Program, error) { config := conf.CreateNew() @@ -194,33 +205,7 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { } config.Check() - tree, err := parser.ParseWithConfig(input, config) - if err != nil { - return nil, err - } - - if len(config.Visitors) > 0 { - for i := 0; i < 1000; i++ { - more := false - for _, v := range config.Visitors { - // We need to perform types check, because some visitors may rely on - // types information available in the tree. - _, _ = checker.Check(tree, config) - - ast.Walk(&tree.Node, v) - - if v, ok := v.(interface { - ShouldRepeat() bool - }); ok { - more = more || v.ShouldRepeat() - } - } - if !more { - break - } - } - } - _, err = checker.Check(tree, config) + tree, err := checker.ParseCheck(input, config) if err != nil { return nil, err } diff --git a/vendor/github.com/expr-lang/expr/file/error.go b/vendor/github.com/expr-lang/expr/file/error.go index edf202b0456..8ff85dfa5f7 100644 --- a/vendor/github.com/expr-lang/expr/file/error.go +++ b/vendor/github.com/expr-lang/expr/file/error.go @@ -8,22 +8,36 @@ import ( type Error struct { Location - Message string - Snippet string - Prev error + Line int `json:"line"` + Column int `json:"column"` + Message string `json:"message"` + Snippet string `json:"snippet"` + Prev error `json:"prev"` } func (e *Error) Error() string { return e.format() } -func (e *Error) Bind(source *Source) *Error { - if snippet, found := source.Snippet(e.Location.Line); found { +func (e *Error) Bind(source Source) *Error { + e.Line = 1 + for i, r := range source { + if i == e.From { + break + } + if r == '\n' { + e.Line++ + e.Column = 0 + } else { + e.Column++ + } + } + if snippet, found := source.Snippet(e.Line); found { snippet := strings.Replace(snippet, "\t", " ", -1) srcLine := "\n | " + snippet var bytes = []byte(snippet) var indLine = "\n | " - for i := 0; i < e.Location.Column && len(bytes) > 0; i++ { + for i := 0; i < e.Column && len(bytes) > 0; i++ { _, sz := utf8.DecodeRune(bytes) bytes = bytes[sz:] if sz > 1 { @@ -54,7 +68,7 @@ func (e *Error) Wrap(err error) { } func (e *Error) format() string { - if e.Location.Empty() { + if e.Snippet == "" { return e.Message } return fmt.Sprintf( diff --git a/vendor/github.com/expr-lang/expr/file/location.go b/vendor/github.com/expr-lang/expr/file/location.go index a92e27f0b1c..6c6bc2427ec 100644 --- a/vendor/github.com/expr-lang/expr/file/location.go +++ b/vendor/github.com/expr-lang/expr/file/location.go @@ -1,10 +1,6 @@ package file type Location struct { - Line int // The 1-based line of the location. - Column int // The 0-based column number of the location. -} - -func (l Location) Empty() bool { - return l.Column == 0 && l.Line == 0 + From int `json:"from"` + To int `json:"to"` } diff --git a/vendor/github.com/expr-lang/expr/file/source.go b/vendor/github.com/expr-lang/expr/file/source.go index d86a546b100..8e2b2d1540c 100644 --- a/vendor/github.com/expr-lang/expr/file/source.go +++ b/vendor/github.com/expr-lang/expr/file/source.go @@ -1,78 +1,47 @@ package file import ( - "encoding/json" "strings" "unicode/utf8" ) -type Source struct { - contents []rune - lineOffsets []int32 -} - -func NewSource(contents string) *Source { - s := &Source{ - contents: []rune(contents), - } - s.updateOffsets() - return s -} - -func (s *Source) MarshalJSON() ([]byte, error) { - return json.Marshal(s.contents) -} - -func (s *Source) UnmarshalJSON(b []byte) error { - contents := make([]rune, 0) - err := json.Unmarshal(b, &contents) - if err != nil { - return err - } +type Source []rune - s.contents = contents - s.updateOffsets() - return nil +func NewSource(contents string) Source { + return []rune(contents) } -func (s *Source) Content() string { - return string(s.contents) +func (s Source) String() string { + return string(s) } -func (s *Source) Snippet(line int) (string, bool) { +func (s Source) Snippet(line int) (string, bool) { if s == nil { return "", false } - charStart, found := s.findLineOffset(line) - if !found || len(s.contents) == 0 { + lines := strings.Split(string(s), "\n") + lineOffsets := make([]int, len(lines)) + var offset int + for i, line := range lines { + offset = offset + utf8.RuneCountInString(line) + 1 + lineOffsets[i] = offset + } + charStart, found := getLineOffset(lineOffsets, line) + if !found || len(s) == 0 { return "", false } - charEnd, found := s.findLineOffset(line + 1) + charEnd, found := getLineOffset(lineOffsets, line+1) if found { - return string(s.contents[charStart : charEnd-1]), true - } - return string(s.contents[charStart:]), true -} - -// updateOffsets compute line offsets up front as they are referred to frequently. -func (s *Source) updateOffsets() { - lines := strings.Split(string(s.contents), "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset + return string(s[charStart : charEnd-1]), true } - s.lineOffsets = offsets + return string(s[charStart:]), true } -// findLineOffset returns the offset where the (1-indexed) line begins, -// or false if line doesn't exist. -func (s *Source) findLineOffset(line int) (int32, bool) { +func getLineOffset(lineOffsets []int, line int) (int, bool) { if line == 1 { return 0, true - } else if line > 1 && line <= len(s.lineOffsets) { - offset := s.lineOffsets[line-2] + } else if line > 1 && line <= len(lineOffsets) { + offset := lineOffsets[line-2] return offset, true } return -1, false diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go index c32658637f2..e6b06c09d09 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go @@ -3,20 +3,18 @@ package lexer import ( "fmt" "strings" - "unicode/utf8" "github.com/expr-lang/expr/file" ) -func Lex(source *file.Source) ([]Token, error) { +func Lex(source file.Source) ([]Token, error) { l := &lexer{ - input: source.Content(), + source: source, tokens: make([]Token, 0), + start: 0, + end: 0, } - - l.loc = file.Location{Line: 1, Column: 0} - l.prev = l.loc - l.startLoc = l.loc + l.commit() for state := root; state != nil; { state = state(l) @@ -30,34 +28,25 @@ func Lex(source *file.Source) ([]Token, error) { } type lexer struct { - input string + source file.Source tokens []Token - start, end int // current position in input - width int // last rune width - startLoc file.Location // start location - prev, loc file.Location // prev location of end location, end location + start, end int err *file.Error } const eof rune = -1 +func (l *lexer) commit() { + l.start = l.end +} + func (l *lexer) next() rune { - if l.end >= len(l.input) { - l.width = 0 + if l.end >= len(l.source) { + l.end++ return eof } - r, w := utf8.DecodeRuneInString(l.input[l.end:]) - l.width = w - l.end += w - - l.prev = l.loc - if r == '\n' { - l.loc.Line++ - l.loc.Column = 0 - } else { - l.loc.Column++ - } - + r := l.source[l.end] + l.end++ return r } @@ -68,8 +57,7 @@ func (l *lexer) peek() rune { } func (l *lexer) backup() { - l.end -= l.width - l.loc = l.prev + l.end-- } func (l *lexer) emit(t Kind) { @@ -78,35 +66,39 @@ func (l *lexer) emit(t Kind) { func (l *lexer) emitValue(t Kind, value string) { l.tokens = append(l.tokens, Token{ - Location: l.startLoc, + Location: file.Location{From: l.start, To: l.end}, Kind: t, Value: value, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) emitEOF() { + from := l.end - 2 + if from < 0 { + from = 0 + } + to := l.end - 1 + if to < 0 { + to = 0 + } l.tokens = append(l.tokens, Token{ - Location: l.prev, // Point to previous position for better error messages. + Location: file.Location{From: from, To: to}, Kind: EOF, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) skip() { - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) word() string { - return l.input[l.start:l.end] -} - -func (l *lexer) ignore() { - l.start = l.end - l.startLoc = l.loc + // TODO: boundary check is NOT needed here, but for some reason CI fuzz tests are failing. + if l.start > len(l.source) || l.end > len(l.source) { + return "__invalid__" + } + return string(l.source[l.start:l.end]) } func (l *lexer) accept(valid string) bool { @@ -132,18 +124,18 @@ func (l *lexer) skipSpaces() { } func (l *lexer) acceptWord(word string) bool { - pos, loc, prev := l.end, l.loc, l.prev + pos := l.end l.skipSpaces() for _, ch := range word { if l.next() != ch { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } } if r := l.peek(); r != ' ' && r != eof { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } @@ -153,8 +145,11 @@ func (l *lexer) acceptWord(word string) bool { func (l *lexer) error(format string, args ...any) stateFn { if l.err == nil { // show first error l.err = &file.Error{ - Location: l.loc, - Message: fmt.Sprintf(format, args...), + Location: file.Location{ + From: l.end - 1, + To: l.end, + }, + Message: fmt.Sprintf(format, args...), } } return nil @@ -230,6 +225,6 @@ func (l *lexer) scanRawString(quote rune) (n int) { ch = l.next() n++ } - l.emitValue(String, l.input[l.start+1:l.end-1]) + l.emitValue(String, string(l.source[l.start+1:l.end-1])) return } diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/state.go b/vendor/github.com/expr-lang/expr/parser/lexer/state.go index 72f02bf4ef7..d351e2f5c8b 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/state.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/state.go @@ -14,7 +14,7 @@ func root(l *lexer) stateFn { l.emitEOF() return nil case utils.IsSpace(r): - l.ignore() + l.skip() return root case r == '\'' || r == '"': l.scanString(r) @@ -83,14 +83,14 @@ func (l *lexer) scanNumber() bool { } } l.acceptRun(digits) - loc, prev, end := l.loc, l.prev, l.end + end := l.end if l.accept(".") { // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator. if l.peek() == '.' { // We can't backup() here, as it would require two backups, // and backup() func supports only one for now. So, save and // restore it here. - l.loc, l.prev, l.end = loc, prev, end + l.end = end return true } l.acceptRun(digits) @@ -147,7 +147,7 @@ func not(l *lexer) stateFn { l.skipSpaces() - pos, loc, prev := l.end, l.loc, l.prev + end := l.end // Get the next word. for { @@ -164,7 +164,7 @@ func not(l *lexer) stateFn { case "in", "matches", "contains", "startsWith", "endsWith": l.emit(Operator) default: - l.end, l.loc, l.prev = pos, loc, prev + l.end = end } return root } @@ -193,7 +193,7 @@ func singleLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } @@ -207,7 +207,7 @@ func multiLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go index 6d96561ad2c..77b2a700a31 100644 --- a/vendor/github.com/expr-lang/expr/parser/parser.go +++ b/vendor/github.com/expr-lang/expr/parser/parser.go @@ -55,7 +55,7 @@ type parser struct { type Tree struct { Node Node - Source *file.Source + Source file.Source } func Parse(input string) (*Tree, error) { @@ -84,14 +84,16 @@ func ParseWithConfig(input string, config *conf.Config) (*Tree, error) { p.error("unexpected token %v", p.current) } + tree := &Tree{ + Node: node, + Source: source, + } + if p.err != nil { - return nil, p.err.Bind(source) + return tree, p.err.Bind(source) } - return &Tree{ - Node: node, - Source: source, - }, nil + return tree, nil } func (p *parser) error(format string, args ...any) { diff --git a/vendor/github.com/expr-lang/expr/patcher/with_timezone.go b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go new file mode 100644 index 00000000000..83eb28e95ac --- /dev/null +++ b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go @@ -0,0 +1,25 @@ +package patcher + +import ( + "time" + + "github.com/expr-lang/expr/ast" +) + +// WithTimezone passes Location to date() and now() functions. +type WithTimezone struct { + Location *time.Location +} + +func (t WithTimezone) Visit(node *ast.Node) { + if btin, ok := (*node).(*ast.BuiltinNode); ok { + switch btin.Name { + case "date", "now": + loc := &ast.ConstantNode{Value: t.Location} + ast.Patch(node, &ast.BuiltinNode{ + Name: btin.Name, + Arguments: append([]ast.Node{loc}, btin.Arguments...), + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/vm/program.go b/vendor/github.com/expr-lang/expr/vm/program.go index 98954674412..15ce26f5b28 100644 --- a/vendor/github.com/expr-lang/expr/vm/program.go +++ b/vendor/github.com/expr-lang/expr/vm/program.go @@ -21,7 +21,7 @@ type Program struct { Arguments []int Constants []any - source *file.Source + source file.Source node ast.Node locations []file.Location variables int @@ -32,7 +32,7 @@ type Program struct { // NewProgram returns a new Program. It's used by the compiler. func NewProgram( - source *file.Source, + source file.Source, node ast.Node, locations []file.Location, variables int, @@ -58,7 +58,7 @@ func NewProgram( } // Source returns origin file.Source. -func (program *Program) Source() *file.Source { +func (program *Program) Source() file.Source { return program.source } diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go index 7da1320de38..cd48a280dc3 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go @@ -35,8 +35,12 @@ func Fetch(from, i any) any { switch v.Kind() { case reflect.Array, reflect.Slice, reflect.String: index := ToInt(i) + l := v.Len() if index < 0 { - index = v.Len() + index + index = l + index + } + if index < 0 || index >= l { + panic(fmt.Sprintf("index out of range: %v (array length is %v)", index, l)) } value := v.Index(index) if value.IsValid() { diff --git a/vendor/github.com/expr-lang/expr/vm/vm.go b/vendor/github.com/expr-lang/expr/vm/vm.go index 7e933ce7408..fa1223b420f 100644 --- a/vendor/github.com/expr-lang/expr/vm/vm.go +++ b/vendor/github.com/expr-lang/expr/vm/vm.go @@ -274,31 +274,50 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { case OpMatches: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } match, err := regexp.MatchString(b.(string), a.(string)) if err != nil { panic(err) } - vm.push(match) case OpMatchesConst: a := vm.pop() + if runtime.IsNil(a) { + vm.push(false) + break + } r := program.Constants[arg].(*regexp.Regexp) vm.push(r.MatchString(a.(string))) case OpContains: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.Contains(a.(string), b.(string))) case OpStartsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasPrefix(a.(string), b.(string))) case OpEndsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasSuffix(a.(string), b.(string))) case OpSlice: diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6e5..7c7f0c69cd9 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4b7..30568e768dc 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index a6e1d0b51d1..9ab0705aeb7 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.19.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.22.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -163,6 +163,7 @@ validate := validator.New(validator.WithRequiredStructEnabled()) | btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | | credit_card | Credit Card Number | | mongodb | MongoDB ObjectID | +| mongodb_connection_string | MongoDB Connection String | | cron | Cron | | spicedb | SpiceDb ObjectID/Permission/Type | | datetime | Datetime | diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index 95f56e00803..b6fbaafad97 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -64,8 +64,9 @@ var ( // defines a common or complex set of validation(s) to simplify // adding validation to structs. bakedInAliases = map[string]string{ - "iscolor": "hexcolor|rgb|rgba|hsl|hsla", - "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + "eu_country_code": "iso3166_1_alpha2_eu|iso3166_1_alpha3_eu|iso3166_1_alpha_numeric_eu", } // bakedInValidators is the default map of ValidationFunc @@ -133,6 +134,7 @@ var ( "urn_rfc2141": isUrnRFC2141, // RFC 2141 "file": isFile, "filepath": isFilePath, + "base32": isBase32, "base64": isBase64, "base64url": isBase64URL, "base64rawurl": isBase64RawURL, @@ -216,8 +218,11 @@ var ( "datetime": isDatetime, "timezone": isTimeZone, "iso3166_1_alpha2": isIso3166Alpha2, + "iso3166_1_alpha2_eu": isIso3166Alpha2EU, "iso3166_1_alpha3": isIso3166Alpha3, + "iso3166_1_alpha3_eu": isIso3166Alpha3EU, "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + "iso3166_1_alpha_numeric_eu": isIso3166AlphaNumericEU, "iso3166_2": isIso31662, "iso4217": isIso4217, "iso4217_numeric": isIso4217Numeric, @@ -230,7 +235,8 @@ var ( "credit_card": isCreditCard, "cve": isCveFormat, "luhn_checksum": hasLuhnChecksum, - "mongodb": isMongoDB, + "mongodb": isMongoDBObjectId, + "mongodb_connection_string": isMongoDBConnectionString, "cron": isCron, "spicedb": isSpiceDB, } @@ -247,7 +253,7 @@ func parseOneOfParam2(s string) []string { oneofValsCacheRWLock.RUnlock() if !ok { oneofValsCacheRWLock.Lock() - vals = splitParamsRegex.FindAllString(s, -1) + vals = splitParamsRegex().FindAllString(s, -1) for i := 0; i < len(vals); i++ { vals[i] = strings.Replace(vals[i], "'", "", -1) } @@ -258,15 +264,15 @@ func parseOneOfParam2(s string) []string { } func isURLEncoded(fl FieldLevel) bool { - return uRLEncodedRegex.MatchString(fl.Field().String()) + return uRLEncodedRegex().MatchString(fl.Field().String()) } func isHTMLEncoded(fl FieldLevel) bool { - return hTMLEncodedRegex.MatchString(fl.Field().String()) + return hTMLEncodedRegex().MatchString(fl.Field().String()) } func isHTML(fl FieldLevel) bool { - return hTMLRegex.MatchString(fl.Field().String()) + return hTMLRegex().MatchString(fl.Field().String()) } func isOneOf(fl FieldLevel) bool { @@ -423,7 +429,7 @@ func isSSN(fl FieldLevel) bool { return false } - return sSNRegex.MatchString(field.String()) + return sSNRegex().MatchString(field.String()) } // isLongitude is the validation function for validating if the field's value is a valid longitude coordinate. @@ -446,7 +452,7 @@ func isLongitude(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return longitudeRegex.MatchString(v) + return longitudeRegex().MatchString(v) } // isLatitude is the validation function for validating if the field's value is a valid latitude coordinate. @@ -469,7 +475,7 @@ func isLatitude(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return latitudeRegex.MatchString(v) + return latitudeRegex().MatchString(v) } // isDataURI is the validation function for validating if the field's value is a valid data URI. @@ -480,11 +486,11 @@ func isDataURI(fl FieldLevel) bool { return false } - if !dataURIRegex.MatchString(uri[0]) { + if !dataURIRegex().MatchString(uri[0]) { return false } - return base64Regex.MatchString(uri[1]) + return base64Regex().MatchString(uri[1]) } // hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. @@ -495,17 +501,17 @@ func hasMultiByteCharacter(fl FieldLevel) bool { return true } - return multibyteRegex.MatchString(field.String()) + return multibyteRegex().MatchString(field.String()) } // isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. func isPrintableASCII(fl FieldLevel) bool { - return printableASCIIRegex.MatchString(fl.Field().String()) + return printableASCIIRegex().MatchString(fl.Field().String()) } // isASCII is the validation function for validating if the field's value is a valid ASCII character. func isASCII(fl FieldLevel) bool { - return aSCIIRegex.MatchString(fl.Field().String()) + return aSCIIRegex().MatchString(fl.Field().String()) } // isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. @@ -555,52 +561,52 @@ func isULID(fl FieldLevel) bool { // isMD4 is the validation function for validating if the field's value is a valid MD4. func isMD4(fl FieldLevel) bool { - return md4Regex.MatchString(fl.Field().String()) + return md4Regex().MatchString(fl.Field().String()) } // isMD5 is the validation function for validating if the field's value is a valid MD5. func isMD5(fl FieldLevel) bool { - return md5Regex.MatchString(fl.Field().String()) + return md5Regex().MatchString(fl.Field().String()) } // isSHA256 is the validation function for validating if the field's value is a valid SHA256. func isSHA256(fl FieldLevel) bool { - return sha256Regex.MatchString(fl.Field().String()) + return sha256Regex().MatchString(fl.Field().String()) } // isSHA384 is the validation function for validating if the field's value is a valid SHA384. func isSHA384(fl FieldLevel) bool { - return sha384Regex.MatchString(fl.Field().String()) + return sha384Regex().MatchString(fl.Field().String()) } // isSHA512 is the validation function for validating if the field's value is a valid SHA512. func isSHA512(fl FieldLevel) bool { - return sha512Regex.MatchString(fl.Field().String()) + return sha512Regex().MatchString(fl.Field().String()) } // isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128. func isRIPEMD128(fl FieldLevel) bool { - return ripemd128Regex.MatchString(fl.Field().String()) + return ripemd128Regex().MatchString(fl.Field().String()) } // isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160. func isRIPEMD160(fl FieldLevel) bool { - return ripemd160Regex.MatchString(fl.Field().String()) + return ripemd160Regex().MatchString(fl.Field().String()) } // isTIGER128 is the validation function for validating if the field's value is a valid TIGER128. func isTIGER128(fl FieldLevel) bool { - return tiger128Regex.MatchString(fl.Field().String()) + return tiger128Regex().MatchString(fl.Field().String()) } // isTIGER160 is the validation function for validating if the field's value is a valid TIGER160. func isTIGER160(fl FieldLevel) bool { - return tiger160Regex.MatchString(fl.Field().String()) + return tiger160Regex().MatchString(fl.Field().String()) } // isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192. func isTIGER192(fl FieldLevel) bool { - return tiger192Regex.MatchString(fl.Field().String()) + return tiger192Regex().MatchString(fl.Field().String()) } // isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. @@ -612,7 +618,7 @@ func isISBN(fl FieldLevel) bool { func isISBN13(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) - if !iSBN13Regex.MatchString(s) { + if !iSBN13Regex().MatchString(s) { return false } @@ -632,7 +638,7 @@ func isISBN13(fl FieldLevel) bool { func isISBN10(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) - if !iSBN10Regex.MatchString(s) { + if !iSBN10Regex().MatchString(s) { return false } @@ -656,7 +662,7 @@ func isISBN10(fl FieldLevel) bool { func isISSN(fl FieldLevel) bool { s := fl.Field().String() - if !iSSNRegex.MatchString(s) { + if !iSSNRegex().MatchString(s) { return false } s = strings.ReplaceAll(s, "-", "") @@ -682,14 +688,14 @@ func isISSN(fl FieldLevel) bool { func isEthereumAddress(fl FieldLevel) bool { address := fl.Field().String() - return ethAddressRegex.MatchString(address) + return ethAddressRegex().MatchString(address) } -// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksumed Ethereum address. +// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksummed Ethereum address. func isEthereumAddressChecksum(fl FieldLevel) bool { address := fl.Field().String() - if !ethAddressRegex.MatchString(address) { + if !ethAddressRegex().MatchString(address) { return false } // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md @@ -715,7 +721,7 @@ func isEthereumAddressChecksum(fl FieldLevel) bool { func isBitcoinAddress(fl FieldLevel) bool { address := fl.Field().String() - if !btcAddressRegex.MatchString(address) { + if !btcAddressRegex().MatchString(address) { return false } @@ -752,7 +758,7 @@ func isBitcoinAddress(fl FieldLevel) bool { func isBitcoinBech32Address(fl FieldLevel) bool { address := fl.Field().String() - if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + if !btcLowerAddressRegexBech32().MatchString(address) && !btcUpperAddressRegexBech32().MatchString(address) { return false } @@ -1364,6 +1370,7 @@ func isPostcodeByIso3166Alpha2(fl FieldLevel) bool { field := fl.Field() param := fl.Param() + postcodeRegexInit.Do(initPostcodes) reg, found := postCodeRegexDict[param] if !found { return false @@ -1399,19 +1406,24 @@ func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { return reg.MatchString(field.String()) } +// isBase32 is the validation function for validating if the current field's value is a valid base 32. +func isBase32(fl FieldLevel) bool { + return base32Regex().MatchString(fl.Field().String()) +} + // isBase64 is the validation function for validating if the current field's value is a valid base 64. func isBase64(fl FieldLevel) bool { - return base64Regex.MatchString(fl.Field().String()) + return base64Regex().MatchString(fl.Field().String()) } // isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. func isBase64URL(fl FieldLevel) bool { - return base64URLRegex.MatchString(fl.Field().String()) + return base64URLRegex().MatchString(fl.Field().String()) } // isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding. func isBase64RawURL(fl FieldLevel) bool { - return base64RawURLRegex.MatchString(fl.Field().String()) + return base64RawURLRegex().MatchString(fl.Field().String()) } // isURI is the validation function for validating if the current field's value is a valid URI. @@ -1657,42 +1669,42 @@ func isFilePath(fl FieldLevel) bool { // isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. func isE164(fl FieldLevel) bool { - return e164Regex.MatchString(fl.Field().String()) + return e164Regex().MatchString(fl.Field().String()) } // isEmail is the validation function for validating if the current field's value is a valid email address. func isEmail(fl FieldLevel) bool { - return emailRegex.MatchString(fl.Field().String()) + return emailRegex().MatchString(fl.Field().String()) } // isHSLA is the validation function for validating if the current field's value is a valid HSLA color. func isHSLA(fl FieldLevel) bool { - return hslaRegex.MatchString(fl.Field().String()) + return hslaRegex().MatchString(fl.Field().String()) } // isHSL is the validation function for validating if the current field's value is a valid HSL color. func isHSL(fl FieldLevel) bool { - return hslRegex.MatchString(fl.Field().String()) + return hslRegex().MatchString(fl.Field().String()) } // isRGBA is the validation function for validating if the current field's value is a valid RGBA color. func isRGBA(fl FieldLevel) bool { - return rgbaRegex.MatchString(fl.Field().String()) + return rgbaRegex().MatchString(fl.Field().String()) } // isRGB is the validation function for validating if the current field's value is a valid RGB color. func isRGB(fl FieldLevel) bool { - return rgbRegex.MatchString(fl.Field().String()) + return rgbRegex().MatchString(fl.Field().String()) } // isHEXColor is the validation function for validating if the current field's value is a valid HEX color. func isHEXColor(fl FieldLevel) bool { - return hexColorRegex.MatchString(fl.Field().String()) + return hexColorRegex().MatchString(fl.Field().String()) } // isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. func isHexadecimal(fl FieldLevel) bool { - return hexadecimalRegex.MatchString(fl.Field().String()) + return hexadecimalRegex().MatchString(fl.Field().String()) } // isNumber is the validation function for validating if the current field's value is a valid number. @@ -1701,7 +1713,7 @@ func isNumber(fl FieldLevel) bool { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: return true default: - return numberRegex.MatchString(fl.Field().String()) + return numberRegex().MatchString(fl.Field().String()) } } @@ -1711,28 +1723,28 @@ func isNumeric(fl FieldLevel) bool { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: return true default: - return numericRegex.MatchString(fl.Field().String()) + return numericRegex().MatchString(fl.Field().String()) } } // isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. func isAlphanum(fl FieldLevel) bool { - return alphaNumericRegex.MatchString(fl.Field().String()) + return alphaNumericRegex().MatchString(fl.Field().String()) } // isAlpha is the validation function for validating if the current field's value is a valid alpha value. func isAlpha(fl FieldLevel) bool { - return alphaRegex.MatchString(fl.Field().String()) + return alphaRegex().MatchString(fl.Field().String()) } // isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. func isAlphanumUnicode(fl FieldLevel) bool { - return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) + return alphaUnicodeNumericRegex().MatchString(fl.Field().String()) } // isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. func isAlphaUnicode(fl FieldLevel) bool { - return alphaUnicodeRegex.MatchString(fl.Field().String()) + return alphaUnicodeRegex().MatchString(fl.Field().String()) } // isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value. @@ -2555,11 +2567,11 @@ func isIP6Addr(fl FieldLevel) bool { } func isHostnameRFC952(fl FieldLevel) bool { - return hostnameRegexRFC952.MatchString(fl.Field().String()) + return hostnameRegexRFC952().MatchString(fl.Field().String()) } func isHostnameRFC1123(fl FieldLevel) bool { - return hostnameRegexRFC1123.MatchString(fl.Field().String()) + return hostnameRegexRFC1123().MatchString(fl.Field().String()) } func isFQDN(fl FieldLevel) bool { @@ -2569,7 +2581,7 @@ func isFQDN(fl FieldLevel) bool { return false } - return fqdnRegexRFC1123.MatchString(val) + return fqdnRegexRFC1123().MatchString(val) } // isDir is the validation function for validating if the current field's value is a valid existing directory. @@ -2668,7 +2680,7 @@ func isJSON(fl FieldLevel) bool { // isJWT is the validation function for validating if the current field's value is a valid JWT string. func isJWT(fl FieldLevel) bool { - return jWTRegex.MatchString(fl.Field().String()) + return jWTRegex().MatchString(fl.Field().String()) } // isHostnamePort validates a : combination for fields typically used for socket address. @@ -2687,7 +2699,7 @@ func isHostnamePort(fl FieldLevel) bool { // If host is specified, it should match a DNS name if host != "" { - return hostnameRegexRFC1123.MatchString(host) + return hostnameRegexRFC1123().MatchString(host) } return true } @@ -2758,14 +2770,26 @@ func isTimeZone(fl FieldLevel) bool { // isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code. func isIso3166Alpha2(fl FieldLevel) bool { - val := fl.Field().String() - return iso3166_1_alpha2[val] + _, ok := iso3166_1_alpha2[fl.Field().String()] + return ok +} + +// isIso3166Alpha2EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 European Union country code. +func isIso3166Alpha2EU(fl FieldLevel) bool { + _, ok := iso3166_1_alpha2_eu[fl.Field().String()] + return ok } // isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code. func isIso3166Alpha3(fl FieldLevel) bool { - val := fl.Field().String() - return iso3166_1_alpha3[val] + _, ok := iso3166_1_alpha3[fl.Field().String()] + return ok +} + +// isIso3166Alpha3EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 European Union country code. +func isIso3166Alpha3EU(fl FieldLevel) bool { + _, ok := iso3166_1_alpha3_eu[fl.Field().String()] + return ok } // isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code. @@ -2787,19 +2811,45 @@ func isIso3166AlphaNumeric(fl FieldLevel) bool { default: panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return iso3166_1_alpha_numeric[code] + + _, ok := iso3166_1_alpha_numeric[code] + return ok +} + +// isIso3166AlphaNumericEU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric European Union country code. +func isIso3166AlphaNumericEU(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.String: + i, err := strconv.Atoi(field.String()) + if err != nil { + return false + } + code = i % 1000 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + _, ok := iso3166_1_alpha_numeric_eu[code] + return ok } // isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code. func isIso31662(fl FieldLevel) bool { - val := fl.Field().String() - return iso3166_2[val] + _, ok := iso3166_2[fl.Field().String()] + return ok } // isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code. func isIso4217(fl FieldLevel) bool { - val := fl.Field().String() - return iso4217[val] + _, ok := iso4217[fl.Field().String()] + return ok } // isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code. @@ -2815,7 +2865,9 @@ func isIso4217Numeric(fl FieldLevel) bool { default: panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return iso4217_numeric[code] + + _, ok := iso4217_numeric[code] + return ok } // isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse @@ -2834,21 +2886,21 @@ func isBCP47LanguageTag(fl FieldLevel) bool { func isIsoBicFormat(fl FieldLevel) bool { bicString := fl.Field().String() - return bicRegex.MatchString(bicString) + return bicRegex().MatchString(bicString) } // isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0 func isSemverFormat(fl FieldLevel) bool { semverString := fl.Field().String() - return semverRegex.MatchString(semverString) + return semverRegex().MatchString(semverString) } // isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org func isCveFormat(fl FieldLevel) bool { cveString := fl.Field().String() - return cveRegex.MatchString(cveString) + return cveRegex().MatchString(cveString) } // isDnsRFC1035LabelFormat is the validation function @@ -2856,7 +2908,7 @@ func isCveFormat(fl FieldLevel) bool { // a valid dns RFC 1035 label, defined in RFC 1035. func isDnsRFC1035LabelFormat(fl FieldLevel) bool { val := fl.Field().String() - return dnsRegexRFC1035Label.MatchString(val) + return dnsRegexRFC1035Label().MatchString(val) } // digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements @@ -2882,10 +2934,16 @@ func digitsHaveLuhnChecksum(digits []string) bool { return (sum % 10) == 0 } -// isMongoDB is the validation function for validating if the current field's value is valid mongoDB objectID -func isMongoDB(fl FieldLevel) bool { +// isMongoDBObjectId is the validation function for validating if the current field's value is valid MongoDB ObjectID +func isMongoDBObjectId(fl FieldLevel) bool { + val := fl.Field().String() + return mongodbIdRegex().MatchString(val) +} + +// isMongoDBConnectionString is the validation function for validating if the current field's value is valid MongoDB Connection String +func isMongoDBConnectionString(fl FieldLevel) bool { val := fl.Field().String() - return mongodbRegex.MatchString(val) + return mongodbConnectionRegex().MatchString(val) } // isSpiceDB is the validation function for validating if the current field's value is valid for use with Authzed SpiceDB in the indicated way @@ -2895,11 +2953,11 @@ func isSpiceDB(fl FieldLevel) bool { switch param { case "permission": - return spicedbPermissionRegex.MatchString(val) + return spicedbPermissionRegex().MatchString(val) case "type": - return spicedbTypeRegex.MatchString(val) + return spicedbTypeRegex().MatchString(val) case "id", "": - return spicedbIDRegex.MatchString(val) + return spicedbIDRegex().MatchString(val) } panic("Unrecognized parameter: " + param) @@ -2951,5 +3009,5 @@ func hasLuhnChecksum(fl FieldLevel) bool { // isCron is the validation function for validating if the current field's value is a valid cron expression func isCron(fl FieldLevel) bool { cronString := fl.Field().String() - return cronRegex.MatchString(cronString) + return cronRegex().MatchString(cronString) } diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go index b6bdd11a1fb..2063e1b795d 100644 --- a/vendor/github.com/go-playground/validator/v10/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -294,7 +294,7 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s if wrapper, ok := v.validations[current.tag]; ok { current.fn = wrapper.fn - current.runValidationWhenNil = wrapper.runValidatinOnNil + current.runValidationWhenNil = wrapper.runValidationOnNil } else { panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) } diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go index 0119f0574d9..b5f10d3c113 100644 --- a/vendor/github.com/go-playground/validator/v10/country_codes.go +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -1,1150 +1,1177 @@ package validator -var iso3166_1_alpha2 = map[string]bool{ +var iso3166_1_alpha2 = map[string]struct{}{ // see: https://www.iso.org/iso-3166-country-codes.html - "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true, - "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true, - "AR": true, "AM": true, "AW": true, "AU": true, "AT": true, - "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true, - "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true, - "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true, - "BV": true, "BR": true, "IO": true, "BN": true, "BG": true, - "BF": true, "BI": true, "KH": true, "CM": true, "CA": true, - "CV": true, "KY": true, "CF": true, "TD": true, "CL": true, - "CN": true, "CX": true, "CC": true, "CO": true, "KM": true, - "CG": true, "CD": true, "CK": true, "CR": true, "CI": true, - "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true, - "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true, - "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true, - "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true, - "FR": true, "GF": true, "PF": true, "TF": true, "GA": true, - "GM": true, "GE": true, "DE": true, "GH": true, "GI": true, - "GR": true, "GL": true, "GD": true, "GP": true, "GU": true, - "GT": true, "GG": true, "GN": true, "GW": true, "GY": true, - "HT": true, "HM": true, "VA": true, "HN": true, "HK": true, - "HU": true, "IS": true, "IN": true, "ID": true, "IR": true, - "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true, - "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true, - "KE": true, "KI": true, "KP": true, "KR": true, "KW": true, - "KG": true, "LA": true, "LV": true, "LB": true, "LS": true, - "LR": true, "LY": true, "LI": true, "LT": true, "LU": true, - "MO": true, "MK": true, "MG": true, "MW": true, "MY": true, - "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true, - "MR": true, "MU": true, "YT": true, "MX": true, "FM": true, - "MD": true, "MC": true, "MN": true, "ME": true, "MS": true, - "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true, - "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true, - "NE": true, "NG": true, "NU": true, "NF": true, "MP": true, - "NO": true, "OM": true, "PK": true, "PW": true, "PS": true, - "PA": true, "PG": true, "PY": true, "PE": true, "PH": true, - "PN": true, "PL": true, "PT": true, "PR": true, "QA": true, - "RE": true, "RO": true, "RU": true, "RW": true, "BL": true, - "SH": true, "KN": true, "LC": true, "MF": true, "PM": true, - "VC": true, "WS": true, "SM": true, "ST": true, "SA": true, - "SN": true, "RS": true, "SC": true, "SL": true, "SG": true, - "SX": true, "SK": true, "SI": true, "SB": true, "SO": true, - "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true, - "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true, - "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true, - "TH": true, "TL": true, "TG": true, "TK": true, "TO": true, - "TT": true, "TN": true, "TR": true, "TM": true, "TC": true, - "TV": true, "UG": true, "UA": true, "AE": true, "GB": true, - "US": true, "UM": true, "UY": true, "UZ": true, "VU": true, - "VE": true, "VN": true, "VG": true, "VI": true, "WF": true, - "EH": true, "YE": true, "ZM": true, "ZW": true, "XK": true, + "AF": {}, "AX": {}, "AL": {}, "DZ": {}, "AS": {}, + "AD": {}, "AO": {}, "AI": {}, "AQ": {}, "AG": {}, + "AR": {}, "AM": {}, "AW": {}, "AU": {}, "AT": {}, + "AZ": {}, "BS": {}, "BH": {}, "BD": {}, "BB": {}, + "BY": {}, "BE": {}, "BZ": {}, "BJ": {}, "BM": {}, + "BT": {}, "BO": {}, "BQ": {}, "BA": {}, "BW": {}, + "BV": {}, "BR": {}, "IO": {}, "BN": {}, "BG": {}, + "BF": {}, "BI": {}, "KH": {}, "CM": {}, "CA": {}, + "CV": {}, "KY": {}, "CF": {}, "TD": {}, "CL": {}, + "CN": {}, "CX": {}, "CC": {}, "CO": {}, "KM": {}, + "CG": {}, "CD": {}, "CK": {}, "CR": {}, "CI": {}, + "HR": {}, "CU": {}, "CW": {}, "CY": {}, "CZ": {}, + "DK": {}, "DJ": {}, "DM": {}, "DO": {}, "EC": {}, + "EG": {}, "SV": {}, "GQ": {}, "ER": {}, "EE": {}, + "ET": {}, "FK": {}, "FO": {}, "FJ": {}, "FI": {}, + "FR": {}, "GF": {}, "PF": {}, "TF": {}, "GA": {}, + "GM": {}, "GE": {}, "DE": {}, "GH": {}, "GI": {}, + "GR": {}, "GL": {}, "GD": {}, "GP": {}, "GU": {}, + "GT": {}, "GG": {}, "GN": {}, "GW": {}, "GY": {}, + "HT": {}, "HM": {}, "VA": {}, "HN": {}, "HK": {}, + "HU": {}, "IS": {}, "IN": {}, "ID": {}, "IR": {}, + "IQ": {}, "IE": {}, "IM": {}, "IL": {}, "IT": {}, + "JM": {}, "JP": {}, "JE": {}, "JO": {}, "KZ": {}, + "KE": {}, "KI": {}, "KP": {}, "KR": {}, "KW": {}, + "KG": {}, "LA": {}, "LV": {}, "LB": {}, "LS": {}, + "LR": {}, "LY": {}, "LI": {}, "LT": {}, "LU": {}, + "MO": {}, "MK": {}, "MG": {}, "MW": {}, "MY": {}, + "MV": {}, "ML": {}, "MT": {}, "MH": {}, "MQ": {}, + "MR": {}, "MU": {}, "YT": {}, "MX": {}, "FM": {}, + "MD": {}, "MC": {}, "MN": {}, "ME": {}, "MS": {}, + "MA": {}, "MZ": {}, "MM": {}, "NA": {}, "NR": {}, + "NP": {}, "NL": {}, "NC": {}, "NZ": {}, "NI": {}, + "NE": {}, "NG": {}, "NU": {}, "NF": {}, "MP": {}, + "NO": {}, "OM": {}, "PK": {}, "PW": {}, "PS": {}, + "PA": {}, "PG": {}, "PY": {}, "PE": {}, "PH": {}, + "PN": {}, "PL": {}, "PT": {}, "PR": {}, "QA": {}, + "RE": {}, "RO": {}, "RU": {}, "RW": {}, "BL": {}, + "SH": {}, "KN": {}, "LC": {}, "MF": {}, "PM": {}, + "VC": {}, "WS": {}, "SM": {}, "ST": {}, "SA": {}, + "SN": {}, "RS": {}, "SC": {}, "SL": {}, "SG": {}, + "SX": {}, "SK": {}, "SI": {}, "SB": {}, "SO": {}, + "ZA": {}, "GS": {}, "SS": {}, "ES": {}, "LK": {}, + "SD": {}, "SR": {}, "SJ": {}, "SZ": {}, "SE": {}, + "CH": {}, "SY": {}, "TW": {}, "TJ": {}, "TZ": {}, + "TH": {}, "TL": {}, "TG": {}, "TK": {}, "TO": {}, + "TT": {}, "TN": {}, "TR": {}, "TM": {}, "TC": {}, + "TV": {}, "UG": {}, "UA": {}, "AE": {}, "GB": {}, + "US": {}, "UM": {}, "UY": {}, "UZ": {}, "VU": {}, + "VE": {}, "VN": {}, "VG": {}, "VI": {}, "WF": {}, + "EH": {}, "YE": {}, "ZM": {}, "ZW": {}, "XK": {}, } -var iso3166_1_alpha3 = map[string]bool{ +var iso3166_1_alpha2_eu = map[string]struct{}{ + "AT": {}, "BE": {}, "BG": {}, "HR": {}, "CY": {}, + "CZ": {}, "DK": {}, "EE": {}, "FI": {}, "FR": {}, + "DE": {}, "GR": {}, "HU": {}, "IE": {}, "IT": {}, + "LV": {}, "LT": {}, "LU": {}, "MT": {}, "NL": {}, + "PL": {}, "PT": {}, "RO": {}, "SK": {}, "SI": {}, + "ES": {}, "SE": {}, +} + +var iso3166_1_alpha3 = map[string]struct{}{ // see: https://www.iso.org/iso-3166-country-codes.html - "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true, - "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true, - "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true, - "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true, - "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true, - "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true, - "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true, - "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true, - "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true, - "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true, - "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true, - "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true, - "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true, - "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true, - "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true, - "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true, - "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true, - "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true, - "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true, - "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true, - "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true, - "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true, - "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true, - "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true, - "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true, - "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true, - "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true, - "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true, - "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true, - "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true, - "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true, - "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true, - "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true, - "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true, - "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true, - "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true, - "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true, - "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true, - "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true, - "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true, - "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true, - "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true, - "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true, - "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true, - "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true, - "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true, - "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true, - "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true, - "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true, - "YEM": true, "ZMB": true, "ZWE": true, "ALA": true, "UNK": true, + "AFG": {}, "ALB": {}, "DZA": {}, "ASM": {}, "AND": {}, + "AGO": {}, "AIA": {}, "ATA": {}, "ATG": {}, "ARG": {}, + "ARM": {}, "ABW": {}, "AUS": {}, "AUT": {}, "AZE": {}, + "BHS": {}, "BHR": {}, "BGD": {}, "BRB": {}, "BLR": {}, + "BEL": {}, "BLZ": {}, "BEN": {}, "BMU": {}, "BTN": {}, + "BOL": {}, "BES": {}, "BIH": {}, "BWA": {}, "BVT": {}, + "BRA": {}, "IOT": {}, "BRN": {}, "BGR": {}, "BFA": {}, + "BDI": {}, "CPV": {}, "KHM": {}, "CMR": {}, "CAN": {}, + "CYM": {}, "CAF": {}, "TCD": {}, "CHL": {}, "CHN": {}, + "CXR": {}, "CCK": {}, "COL": {}, "COM": {}, "COD": {}, + "COG": {}, "COK": {}, "CRI": {}, "HRV": {}, "CUB": {}, + "CUW": {}, "CYP": {}, "CZE": {}, "CIV": {}, "DNK": {}, + "DJI": {}, "DMA": {}, "DOM": {}, "ECU": {}, "EGY": {}, + "SLV": {}, "GNQ": {}, "ERI": {}, "EST": {}, "SWZ": {}, + "ETH": {}, "FLK": {}, "FRO": {}, "FJI": {}, "FIN": {}, + "FRA": {}, "GUF": {}, "PYF": {}, "ATF": {}, "GAB": {}, + "GMB": {}, "GEO": {}, "DEU": {}, "GHA": {}, "GIB": {}, + "GRC": {}, "GRL": {}, "GRD": {}, "GLP": {}, "GUM": {}, + "GTM": {}, "GGY": {}, "GIN": {}, "GNB": {}, "GUY": {}, + "HTI": {}, "HMD": {}, "VAT": {}, "HND": {}, "HKG": {}, + "HUN": {}, "ISL": {}, "IND": {}, "IDN": {}, "IRN": {}, + "IRQ": {}, "IRL": {}, "IMN": {}, "ISR": {}, "ITA": {}, + "JAM": {}, "JPN": {}, "JEY": {}, "JOR": {}, "KAZ": {}, + "KEN": {}, "KIR": {}, "PRK": {}, "KOR": {}, "KWT": {}, + "KGZ": {}, "LAO": {}, "LVA": {}, "LBN": {}, "LSO": {}, + "LBR": {}, "LBY": {}, "LIE": {}, "LTU": {}, "LUX": {}, + "MAC": {}, "MDG": {}, "MWI": {}, "MYS": {}, "MDV": {}, + "MLI": {}, "MLT": {}, "MHL": {}, "MTQ": {}, "MRT": {}, + "MUS": {}, "MYT": {}, "MEX": {}, "FSM": {}, "MDA": {}, + "MCO": {}, "MNG": {}, "MNE": {}, "MSR": {}, "MAR": {}, + "MOZ": {}, "MMR": {}, "NAM": {}, "NRU": {}, "NPL": {}, + "NLD": {}, "NCL": {}, "NZL": {}, "NIC": {}, "NER": {}, + "NGA": {}, "NIU": {}, "NFK": {}, "MKD": {}, "MNP": {}, + "NOR": {}, "OMN": {}, "PAK": {}, "PLW": {}, "PSE": {}, + "PAN": {}, "PNG": {}, "PRY": {}, "PER": {}, "PHL": {}, + "PCN": {}, "POL": {}, "PRT": {}, "PRI": {}, "QAT": {}, + "ROU": {}, "RUS": {}, "RWA": {}, "REU": {}, "BLM": {}, + "SHN": {}, "KNA": {}, "LCA": {}, "MAF": {}, "SPM": {}, + "VCT": {}, "WSM": {}, "SMR": {}, "STP": {}, "SAU": {}, + "SEN": {}, "SRB": {}, "SYC": {}, "SLE": {}, "SGP": {}, + "SXM": {}, "SVK": {}, "SVN": {}, "SLB": {}, "SOM": {}, + "ZAF": {}, "SGS": {}, "SSD": {}, "ESP": {}, "LKA": {}, + "SDN": {}, "SUR": {}, "SJM": {}, "SWE": {}, "CHE": {}, + "SYR": {}, "TWN": {}, "TJK": {}, "TZA": {}, "THA": {}, + "TLS": {}, "TGO": {}, "TKL": {}, "TON": {}, "TTO": {}, + "TUN": {}, "TUR": {}, "TKM": {}, "TCA": {}, "TUV": {}, + "UGA": {}, "UKR": {}, "ARE": {}, "GBR": {}, "UMI": {}, + "USA": {}, "URY": {}, "UZB": {}, "VUT": {}, "VEN": {}, + "VNM": {}, "VGB": {}, "VIR": {}, "WLF": {}, "ESH": {}, + "YEM": {}, "ZMB": {}, "ZWE": {}, "ALA": {}, "UNK": {}, +} + +var iso3166_1_alpha3_eu = map[string]struct{}{ + "AUT": {}, "BEL": {}, "BGR": {}, "HRV": {}, "CYP": {}, + "CZE": {}, "DNK": {}, "EST": {}, "FIN": {}, "FRA": {}, + "DEU": {}, "GRC": {}, "HUN": {}, "IRL": {}, "ITA": {}, + "LVA": {}, "LTU": {}, "LUX": {}, "MLT": {}, "NLD": {}, + "POL": {}, "PRT": {}, "ROU": {}, "SVK": {}, "SVN": {}, + "ESP": {}, "SWE": {}, } -var iso3166_1_alpha_numeric = map[int]bool{ +var iso3166_1_alpha_numeric = map[int]struct{}{ // see: https://www.iso.org/iso-3166-country-codes.html - 4: true, 8: true, 12: true, 16: true, 20: true, - 24: true, 660: true, 10: true, 28: true, 32: true, - 51: true, 533: true, 36: true, 40: true, 31: true, - 44: true, 48: true, 50: true, 52: true, 112: true, - 56: true, 84: true, 204: true, 60: true, 64: true, - 68: true, 535: true, 70: true, 72: true, 74: true, - 76: true, 86: true, 96: true, 100: true, 854: true, - 108: true, 132: true, 116: true, 120: true, 124: true, - 136: true, 140: true, 148: true, 152: true, 156: true, - 162: true, 166: true, 170: true, 174: true, 180: true, - 178: true, 184: true, 188: true, 191: true, 192: true, - 531: true, 196: true, 203: true, 384: true, 208: true, - 262: true, 212: true, 214: true, 218: true, 818: true, - 222: true, 226: true, 232: true, 233: true, 748: true, - 231: true, 238: true, 234: true, 242: true, 246: true, - 250: true, 254: true, 258: true, 260: true, 266: true, - 270: true, 268: true, 276: true, 288: true, 292: true, - 300: true, 304: true, 308: true, 312: true, 316: true, - 320: true, 831: true, 324: true, 624: true, 328: true, - 332: true, 334: true, 336: true, 340: true, 344: true, - 348: true, 352: true, 356: true, 360: true, 364: true, - 368: true, 372: true, 833: true, 376: true, 380: true, - 388: true, 392: true, 832: true, 400: true, 398: true, - 404: true, 296: true, 408: true, 410: true, 414: true, - 417: true, 418: true, 428: true, 422: true, 426: true, - 430: true, 434: true, 438: true, 440: true, 442: true, - 446: true, 450: true, 454: true, 458: true, 462: true, - 466: true, 470: true, 584: true, 474: true, 478: true, - 480: true, 175: true, 484: true, 583: true, 498: true, - 492: true, 496: true, 499: true, 500: true, 504: true, - 508: true, 104: true, 516: true, 520: true, 524: true, - 528: true, 540: true, 554: true, 558: true, 562: true, - 566: true, 570: true, 574: true, 807: true, 580: true, - 578: true, 512: true, 586: true, 585: true, 275: true, - 591: true, 598: true, 600: true, 604: true, 608: true, - 612: true, 616: true, 620: true, 630: true, 634: true, - 642: true, 643: true, 646: true, 638: true, 652: true, - 654: true, 659: true, 662: true, 663: true, 666: true, - 670: true, 882: true, 674: true, 678: true, 682: true, - 686: true, 688: true, 690: true, 694: true, 702: true, - 534: true, 703: true, 705: true, 90: true, 706: true, - 710: true, 239: true, 728: true, 724: true, 144: true, - 729: true, 740: true, 744: true, 752: true, 756: true, - 760: true, 158: true, 762: true, 834: true, 764: true, - 626: true, 768: true, 772: true, 776: true, 780: true, - 788: true, 792: true, 795: true, 796: true, 798: true, - 800: true, 804: true, 784: true, 826: true, 581: true, - 840: true, 858: true, 860: true, 548: true, 862: true, - 704: true, 92: true, 850: true, 876: true, 732: true, - 887: true, 894: true, 716: true, 248: true, 153: true, + 4: {}, 8: {}, 12: {}, 16: {}, 20: {}, + 24: {}, 660: {}, 10: {}, 28: {}, 32: {}, + 51: {}, 533: {}, 36: {}, 40: {}, 31: {}, + 44: {}, 48: {}, 50: {}, 52: {}, 112: {}, + 56: {}, 84: {}, 204: {}, 60: {}, 64: {}, + 68: {}, 535: {}, 70: {}, 72: {}, 74: {}, + 76: {}, 86: {}, 96: {}, 100: {}, 854: {}, + 108: {}, 132: {}, 116: {}, 120: {}, 124: {}, + 136: {}, 140: {}, 148: {}, 152: {}, 156: {}, + 162: {}, 166: {}, 170: {}, 174: {}, 180: {}, + 178: {}, 184: {}, 188: {}, 191: {}, 192: {}, + 531: {}, 196: {}, 203: {}, 384: {}, 208: {}, + 262: {}, 212: {}, 214: {}, 218: {}, 818: {}, + 222: {}, 226: {}, 232: {}, 233: {}, 748: {}, + 231: {}, 238: {}, 234: {}, 242: {}, 246: {}, + 250: {}, 254: {}, 258: {}, 260: {}, 266: {}, + 270: {}, 268: {}, 276: {}, 288: {}, 292: {}, + 300: {}, 304: {}, 308: {}, 312: {}, 316: {}, + 320: {}, 831: {}, 324: {}, 624: {}, 328: {}, + 332: {}, 334: {}, 336: {}, 340: {}, 344: {}, + 348: {}, 352: {}, 356: {}, 360: {}, 364: {}, + 368: {}, 372: {}, 833: {}, 376: {}, 380: {}, + 388: {}, 392: {}, 832: {}, 400: {}, 398: {}, + 404: {}, 296: {}, 408: {}, 410: {}, 414: {}, + 417: {}, 418: {}, 428: {}, 422: {}, 426: {}, + 430: {}, 434: {}, 438: {}, 440: {}, 442: {}, + 446: {}, 450: {}, 454: {}, 458: {}, 462: {}, + 466: {}, 470: {}, 584: {}, 474: {}, 478: {}, + 480: {}, 175: {}, 484: {}, 583: {}, 498: {}, + 492: {}, 496: {}, 499: {}, 500: {}, 504: {}, + 508: {}, 104: {}, 516: {}, 520: {}, 524: {}, + 528: {}, 540: {}, 554: {}, 558: {}, 562: {}, + 566: {}, 570: {}, 574: {}, 807: {}, 580: {}, + 578: {}, 512: {}, 586: {}, 585: {}, 275: {}, + 591: {}, 598: {}, 600: {}, 604: {}, 608: {}, + 612: {}, 616: {}, 620: {}, 630: {}, 634: {}, + 642: {}, 643: {}, 646: {}, 638: {}, 652: {}, + 654: {}, 659: {}, 662: {}, 663: {}, 666: {}, + 670: {}, 882: {}, 674: {}, 678: {}, 682: {}, + 686: {}, 688: {}, 690: {}, 694: {}, 702: {}, + 534: {}, 703: {}, 705: {}, 90: {}, 706: {}, + 710: {}, 239: {}, 728: {}, 724: {}, 144: {}, + 729: {}, 740: {}, 744: {}, 752: {}, 756: {}, + 760: {}, 158: {}, 762: {}, 834: {}, 764: {}, + 626: {}, 768: {}, 772: {}, 776: {}, 780: {}, + 788: {}, 792: {}, 795: {}, 796: {}, 798: {}, + 800: {}, 804: {}, 784: {}, 826: {}, 581: {}, + 840: {}, 858: {}, 860: {}, 548: {}, 862: {}, + 704: {}, 92: {}, 850: {}, 876: {}, 732: {}, + 887: {}, 894: {}, 716: {}, 248: {}, 153: {}, +} + +var iso3166_1_alpha_numeric_eu = map[int]struct{}{ + 40: {}, 56: {}, 100: {}, 191: {}, 196: {}, + 200: {}, 208: {}, 233: {}, 246: {}, 250: {}, + 276: {}, 300: {}, 348: {}, 372: {}, 380: {}, + 428: {}, 440: {}, 442: {}, 470: {}, 528: {}, + 616: {}, 620: {}, 642: {}, 703: {}, 705: {}, + 724: {}, 752: {}, } -var iso3166_2 = map[string]bool{ - "AD-02": true, "AD-03": true, "AD-04": true, "AD-05": true, "AD-06": true, - "AD-07": true, "AD-08": true, "AE-AJ": true, "AE-AZ": true, "AE-DU": true, - "AE-FU": true, "AE-RK": true, "AE-SH": true, "AE-UQ": true, "AF-BAL": true, - "AF-BAM": true, "AF-BDG": true, "AF-BDS": true, "AF-BGL": true, "AF-DAY": true, - "AF-FRA": true, "AF-FYB": true, "AF-GHA": true, "AF-GHO": true, "AF-HEL": true, - "AF-HER": true, "AF-JOW": true, "AF-KAB": true, "AF-KAN": true, "AF-KAP": true, - "AF-KDZ": true, "AF-KHO": true, "AF-KNR": true, "AF-LAG": true, "AF-LOG": true, - "AF-NAN": true, "AF-NIM": true, "AF-NUR": true, "AF-PAN": true, "AF-PAR": true, - "AF-PIA": true, "AF-PKA": true, "AF-SAM": true, "AF-SAR": true, "AF-TAK": true, - "AF-URU": true, "AF-WAR": true, "AF-ZAB": true, "AG-03": true, "AG-04": true, - "AG-05": true, "AG-06": true, "AG-07": true, "AG-08": true, "AG-10": true, - "AG-11": true, "AL-01": true, "AL-02": true, "AL-03": true, "AL-04": true, - "AL-05": true, "AL-06": true, "AL-07": true, "AL-08": true, "AL-09": true, - "AL-10": true, "AL-11": true, "AL-12": true, "AL-BR": true, "AL-BU": true, - "AL-DI": true, "AL-DL": true, "AL-DR": true, "AL-DV": true, "AL-EL": true, - "AL-ER": true, "AL-FR": true, "AL-GJ": true, "AL-GR": true, "AL-HA": true, - "AL-KA": true, "AL-KB": true, "AL-KC": true, "AL-KO": true, "AL-KR": true, - "AL-KU": true, "AL-LB": true, "AL-LE": true, "AL-LU": true, "AL-MK": true, - "AL-MM": true, "AL-MR": true, "AL-MT": true, "AL-PG": true, "AL-PQ": true, - "AL-PR": true, "AL-PU": true, "AL-SH": true, "AL-SK": true, "AL-SR": true, - "AL-TE": true, "AL-TP": true, "AL-TR": true, "AL-VL": true, "AM-AG": true, - "AM-AR": true, "AM-AV": true, "AM-ER": true, "AM-GR": true, "AM-KT": true, - "AM-LO": true, "AM-SH": true, "AM-SU": true, "AM-TV": true, "AM-VD": true, - "AO-BGO": true, "AO-BGU": true, "AO-BIE": true, "AO-CAB": true, "AO-CCU": true, - "AO-CNN": true, "AO-CNO": true, "AO-CUS": true, "AO-HUA": true, "AO-HUI": true, - "AO-LNO": true, "AO-LSU": true, "AO-LUA": true, "AO-MAL": true, "AO-MOX": true, - "AO-NAM": true, "AO-UIG": true, "AO-ZAI": true, "AR-A": true, "AR-B": true, - "AR-C": true, "AR-D": true, "AR-E": true, "AR-F": true, "AR-G": true, "AR-H": true, - "AR-J": true, "AR-K": true, "AR-L": true, "AR-M": true, "AR-N": true, - "AR-P": true, "AR-Q": true, "AR-R": true, "AR-S": true, "AR-T": true, - "AR-U": true, "AR-V": true, "AR-W": true, "AR-X": true, "AR-Y": true, - "AR-Z": true, "AT-1": true, "AT-2": true, "AT-3": true, "AT-4": true, - "AT-5": true, "AT-6": true, "AT-7": true, "AT-8": true, "AT-9": true, - "AU-ACT": true, "AU-NSW": true, "AU-NT": true, "AU-QLD": true, "AU-SA": true, - "AU-TAS": true, "AU-VIC": true, "AU-WA": true, "AZ-ABS": true, "AZ-AGA": true, - "AZ-AGC": true, "AZ-AGM": true, "AZ-AGS": true, "AZ-AGU": true, "AZ-AST": true, - "AZ-BA": true, "AZ-BAB": true, "AZ-BAL": true, "AZ-BAR": true, "AZ-BEY": true, - "AZ-BIL": true, "AZ-CAB": true, "AZ-CAL": true, "AZ-CUL": true, "AZ-DAS": true, - "AZ-FUZ": true, "AZ-GA": true, "AZ-GAD": true, "AZ-GOR": true, "AZ-GOY": true, - "AZ-GYG": true, "AZ-HAC": true, "AZ-IMI": true, "AZ-ISM": true, "AZ-KAL": true, - "AZ-KAN": true, "AZ-KUR": true, "AZ-LA": true, "AZ-LAC": true, "AZ-LAN": true, - "AZ-LER": true, "AZ-MAS": true, "AZ-MI": true, "AZ-NA": true, "AZ-NEF": true, - "AZ-NV": true, "AZ-NX": true, "AZ-OGU": true, "AZ-ORD": true, "AZ-QAB": true, - "AZ-QAX": true, "AZ-QAZ": true, "AZ-QBA": true, "AZ-QBI": true, "AZ-QOB": true, - "AZ-QUS": true, "AZ-SA": true, "AZ-SAB": true, "AZ-SAD": true, "AZ-SAH": true, - "AZ-SAK": true, "AZ-SAL": true, "AZ-SAR": true, "AZ-SAT": true, "AZ-SBN": true, - "AZ-SIY": true, "AZ-SKR": true, "AZ-SM": true, "AZ-SMI": true, "AZ-SMX": true, - "AZ-SR": true, "AZ-SUS": true, "AZ-TAR": true, "AZ-TOV": true, "AZ-UCA": true, - "AZ-XA": true, "AZ-XAC": true, "AZ-XCI": true, "AZ-XIZ": true, "AZ-XVD": true, - "AZ-YAR": true, "AZ-YE": true, "AZ-YEV": true, "AZ-ZAN": true, "AZ-ZAQ": true, - "AZ-ZAR": true, "BA-01": true, "BA-02": true, "BA-03": true, "BA-04": true, - "BA-05": true, "BA-06": true, "BA-07": true, "BA-08": true, "BA-09": true, - "BA-10": true, "BA-BIH": true, "BA-BRC": true, "BA-SRP": true, "BB-01": true, - "BB-02": true, "BB-03": true, "BB-04": true, "BB-05": true, "BB-06": true, - "BB-07": true, "BB-08": true, "BB-09": true, "BB-10": true, "BB-11": true, - "BD-01": true, "BD-02": true, "BD-03": true, "BD-04": true, "BD-05": true, - "BD-06": true, "BD-07": true, "BD-08": true, "BD-09": true, "BD-10": true, - "BD-11": true, "BD-12": true, "BD-13": true, "BD-14": true, "BD-15": true, - "BD-16": true, "BD-17": true, "BD-18": true, "BD-19": true, "BD-20": true, - "BD-21": true, "BD-22": true, "BD-23": true, "BD-24": true, "BD-25": true, - "BD-26": true, "BD-27": true, "BD-28": true, "BD-29": true, "BD-30": true, - "BD-31": true, "BD-32": true, "BD-33": true, "BD-34": true, "BD-35": true, - "BD-36": true, "BD-37": true, "BD-38": true, "BD-39": true, "BD-40": true, - "BD-41": true, "BD-42": true, "BD-43": true, "BD-44": true, "BD-45": true, - "BD-46": true, "BD-47": true, "BD-48": true, "BD-49": true, "BD-50": true, - "BD-51": true, "BD-52": true, "BD-53": true, "BD-54": true, "BD-55": true, - "BD-56": true, "BD-57": true, "BD-58": true, "BD-59": true, "BD-60": true, - "BD-61": true, "BD-62": true, "BD-63": true, "BD-64": true, "BD-A": true, - "BD-B": true, "BD-C": true, "BD-D": true, "BD-E": true, "BD-F": true, - "BD-G": true, "BE-BRU": true, "BE-VAN": true, "BE-VBR": true, "BE-VLG": true, - "BE-VLI": true, "BE-VOV": true, "BE-VWV": true, "BE-WAL": true, "BE-WBR": true, - "BE-WHT": true, "BE-WLG": true, "BE-WLX": true, "BE-WNA": true, "BF-01": true, - "BF-02": true, "BF-03": true, "BF-04": true, "BF-05": true, "BF-06": true, - "BF-07": true, "BF-08": true, "BF-09": true, "BF-10": true, "BF-11": true, - "BF-12": true, "BF-13": true, "BF-BAL": true, "BF-BAM": true, "BF-BAN": true, - "BF-BAZ": true, "BF-BGR": true, "BF-BLG": true, "BF-BLK": true, "BF-COM": true, - "BF-GAN": true, "BF-GNA": true, "BF-GOU": true, "BF-HOU": true, "BF-IOB": true, - "BF-KAD": true, "BF-KEN": true, "BF-KMD": true, "BF-KMP": true, "BF-KOP": true, - "BF-KOS": true, "BF-KOT": true, "BF-KOW": true, "BF-LER": true, "BF-LOR": true, - "BF-MOU": true, "BF-NAM": true, "BF-NAO": true, "BF-NAY": true, "BF-NOU": true, - "BF-OUB": true, "BF-OUD": true, "BF-PAS": true, "BF-PON": true, "BF-SEN": true, - "BF-SIS": true, "BF-SMT": true, "BF-SNG": true, "BF-SOM": true, "BF-SOR": true, - "BF-TAP": true, "BF-TUI": true, "BF-YAG": true, "BF-YAT": true, "BF-ZIR": true, - "BF-ZON": true, "BF-ZOU": true, "BG-01": true, "BG-02": true, "BG-03": true, - "BG-04": true, "BG-05": true, "BG-06": true, "BG-07": true, "BG-08": true, - "BG-09": true, "BG-10": true, "BG-11": true, "BG-12": true, "BG-13": true, - "BG-14": true, "BG-15": true, "BG-16": true, "BG-17": true, "BG-18": true, - "BG-19": true, "BG-20": true, "BG-21": true, "BG-22": true, "BG-23": true, - "BG-24": true, "BG-25": true, "BG-26": true, "BG-27": true, "BG-28": true, - "BH-13": true, "BH-14": true, "BH-15": true, "BH-16": true, "BH-17": true, - "BI-BB": true, "BI-BL": true, "BI-BM": true, "BI-BR": true, "BI-CA": true, - "BI-CI": true, "BI-GI": true, "BI-KI": true, "BI-KR": true, "BI-KY": true, - "BI-MA": true, "BI-MU": true, "BI-MW": true, "BI-NG": true, "BI-RM": true, "BI-RT": true, - "BI-RY": true, "BJ-AK": true, "BJ-AL": true, "BJ-AQ": true, "BJ-BO": true, - "BJ-CO": true, "BJ-DO": true, "BJ-KO": true, "BJ-LI": true, "BJ-MO": true, - "BJ-OU": true, "BJ-PL": true, "BJ-ZO": true, "BN-BE": true, "BN-BM": true, - "BN-TE": true, "BN-TU": true, "BO-B": true, "BO-C": true, "BO-H": true, - "BO-L": true, "BO-N": true, "BO-O": true, "BO-P": true, "BO-S": true, - "BO-T": true, "BQ-BO": true, "BQ-SA": true, "BQ-SE": true, "BR-AC": true, - "BR-AL": true, "BR-AM": true, "BR-AP": true, "BR-BA": true, "BR-CE": true, - "BR-DF": true, "BR-ES": true, "BR-FN": true, "BR-GO": true, "BR-MA": true, - "BR-MG": true, "BR-MS": true, "BR-MT": true, "BR-PA": true, "BR-PB": true, - "BR-PE": true, "BR-PI": true, "BR-PR": true, "BR-RJ": true, "BR-RN": true, - "BR-RO": true, "BR-RR": true, "BR-RS": true, "BR-SC": true, "BR-SE": true, - "BR-SP": true, "BR-TO": true, "BS-AK": true, "BS-BI": true, "BS-BP": true, - "BS-BY": true, "BS-CE": true, "BS-CI": true, "BS-CK": true, "BS-CO": true, - "BS-CS": true, "BS-EG": true, "BS-EX": true, "BS-FP": true, "BS-GC": true, - "BS-HI": true, "BS-HT": true, "BS-IN": true, "BS-LI": true, "BS-MC": true, - "BS-MG": true, "BS-MI": true, "BS-NE": true, "BS-NO": true, "BS-NP": true, "BS-NS": true, - "BS-RC": true, "BS-RI": true, "BS-SA": true, "BS-SE": true, "BS-SO": true, - "BS-SS": true, "BS-SW": true, "BS-WG": true, "BT-11": true, "BT-12": true, - "BT-13": true, "BT-14": true, "BT-15": true, "BT-21": true, "BT-22": true, - "BT-23": true, "BT-24": true, "BT-31": true, "BT-32": true, "BT-33": true, - "BT-34": true, "BT-41": true, "BT-42": true, "BT-43": true, "BT-44": true, - "BT-45": true, "BT-GA": true, "BT-TY": true, "BW-CE": true, "BW-CH": true, "BW-GH": true, - "BW-KG": true, "BW-KL": true, "BW-KW": true, "BW-NE": true, "BW-NW": true, - "BW-SE": true, "BW-SO": true, "BY-BR": true, "BY-HM": true, "BY-HO": true, - "BY-HR": true, "BY-MA": true, "BY-MI": true, "BY-VI": true, "BZ-BZ": true, - "BZ-CY": true, "BZ-CZL": true, "BZ-OW": true, "BZ-SC": true, "BZ-TOL": true, - "CA-AB": true, "CA-BC": true, "CA-MB": true, "CA-NB": true, "CA-NL": true, - "CA-NS": true, "CA-NT": true, "CA-NU": true, "CA-ON": true, "CA-PE": true, - "CA-QC": true, "CA-SK": true, "CA-YT": true, "CD-BC": true, "CD-BN": true, - "CD-EQ": true, "CD-HK": true, "CD-IT": true, "CD-KA": true, "CD-KC": true, "CD-KE": true, "CD-KG": true, "CD-KN": true, - "CD-KW": true, "CD-KS": true, "CD-LU": true, "CD-MA": true, "CD-NK": true, "CD-OR": true, "CD-SA": true, "CD-SK": true, - "CD-TA": true, "CD-TO": true, "CF-AC": true, "CF-BB": true, "CF-BGF": true, "CF-BK": true, "CF-HK": true, "CF-HM": true, - "CF-HS": true, "CF-KB": true, "CF-KG": true, "CF-LB": true, "CF-MB": true, - "CF-MP": true, "CF-NM": true, "CF-OP": true, "CF-SE": true, "CF-UK": true, - "CF-VK": true, "CG-11": true, "CG-12": true, "CG-13": true, "CG-14": true, - "CG-15": true, "CG-16": true, "CG-2": true, "CG-5": true, "CG-7": true, "CG-8": true, - "CG-9": true, "CG-BZV": true, "CH-AG": true, "CH-AI": true, "CH-AR": true, - "CH-BE": true, "CH-BL": true, "CH-BS": true, "CH-FR": true, "CH-GE": true, - "CH-GL": true, "CH-GR": true, "CH-JU": true, "CH-LU": true, "CH-NE": true, - "CH-NW": true, "CH-OW": true, "CH-SG": true, "CH-SH": true, "CH-SO": true, - "CH-SZ": true, "CH-TG": true, "CH-TI": true, "CH-UR": true, "CH-VD": true, - "CH-VS": true, "CH-ZG": true, "CH-ZH": true, "CI-AB": true, "CI-BS": true, - "CI-CM": true, "CI-DN": true, "CI-GD": true, "CI-LC": true, "CI-LG": true, - "CI-MG": true, "CI-SM": true, "CI-SV": true, "CI-VB": true, "CI-WR": true, - "CI-YM": true, "CI-ZZ": true, "CL-AI": true, "CL-AN": true, "CL-AP": true, - "CL-AR": true, "CL-AT": true, "CL-BI": true, "CL-CO": true, "CL-LI": true, - "CL-LL": true, "CL-LR": true, "CL-MA": true, "CL-ML": true, "CL-NB": true, "CL-RM": true, - "CL-TA": true, "CL-VS": true, "CM-AD": true, "CM-CE": true, "CM-EN": true, - "CM-ES": true, "CM-LT": true, "CM-NO": true, "CM-NW": true, "CM-OU": true, - "CM-SU": true, "CM-SW": true, "CN-AH": true, "CN-BJ": true, "CN-CQ": true, - "CN-FJ": true, "CN-GS": true, "CN-GD": true, "CN-GX": true, "CN-GZ": true, - "CN-HI": true, "CN-HE": true, "CN-HL": true, "CN-HA": true, "CN-HB": true, - "CN-HN": true, "CN-JS": true, "CN-JX": true, "CN-JL": true, "CN-LN": true, - "CN-NM": true, "CN-NX": true, "CN-QH": true, "CN-SN": true, "CN-SD": true, "CN-SH": true, - "CN-SX": true, "CN-SC": true, "CN-TJ": true, "CN-XJ": true, "CN-XZ": true, "CN-YN": true, - "CN-ZJ": true, "CO-AMA": true, "CO-ANT": true, "CO-ARA": true, "CO-ATL": true, - "CO-BOL": true, "CO-BOY": true, "CO-CAL": true, "CO-CAQ": true, "CO-CAS": true, - "CO-CAU": true, "CO-CES": true, "CO-CHO": true, "CO-COR": true, "CO-CUN": true, - "CO-DC": true, "CO-GUA": true, "CO-GUV": true, "CO-HUI": true, "CO-LAG": true, - "CO-MAG": true, "CO-MET": true, "CO-NAR": true, "CO-NSA": true, "CO-PUT": true, - "CO-QUI": true, "CO-RIS": true, "CO-SAN": true, "CO-SAP": true, "CO-SUC": true, - "CO-TOL": true, "CO-VAC": true, "CO-VAU": true, "CO-VID": true, "CR-A": true, - "CR-C": true, "CR-G": true, "CR-H": true, "CR-L": true, "CR-P": true, - "CR-SJ": true, "CU-01": true, "CU-02": true, "CU-03": true, "CU-04": true, - "CU-05": true, "CU-06": true, "CU-07": true, "CU-08": true, "CU-09": true, - "CU-10": true, "CU-11": true, "CU-12": true, "CU-13": true, "CU-14": true, "CU-15": true, - "CU-16": true, "CU-99": true, "CV-B": true, "CV-BR": true, "CV-BV": true, "CV-CA": true, - "CV-CF": true, "CV-CR": true, "CV-MA": true, "CV-MO": true, "CV-PA": true, - "CV-PN": true, "CV-PR": true, "CV-RB": true, "CV-RG": true, "CV-RS": true, - "CV-S": true, "CV-SD": true, "CV-SF": true, "CV-SL": true, "CV-SM": true, - "CV-SO": true, "CV-SS": true, "CV-SV": true, "CV-TA": true, "CV-TS": true, - "CY-01": true, "CY-02": true, "CY-03": true, "CY-04": true, "CY-05": true, - "CY-06": true, "CZ-10": true, "CZ-101": true, "CZ-102": true, "CZ-103": true, - "CZ-104": true, "CZ-105": true, "CZ-106": true, "CZ-107": true, "CZ-108": true, - "CZ-109": true, "CZ-110": true, "CZ-111": true, "CZ-112": true, "CZ-113": true, - "CZ-114": true, "CZ-115": true, "CZ-116": true, "CZ-117": true, "CZ-118": true, - "CZ-119": true, "CZ-120": true, "CZ-121": true, "CZ-122": true, "CZ-20": true, - "CZ-201": true, "CZ-202": true, "CZ-203": true, "CZ-204": true, "CZ-205": true, - "CZ-206": true, "CZ-207": true, "CZ-208": true, "CZ-209": true, "CZ-20A": true, - "CZ-20B": true, "CZ-20C": true, "CZ-31": true, "CZ-311": true, "CZ-312": true, - "CZ-313": true, "CZ-314": true, "CZ-315": true, "CZ-316": true, "CZ-317": true, - "CZ-32": true, "CZ-321": true, "CZ-322": true, "CZ-323": true, "CZ-324": true, - "CZ-325": true, "CZ-326": true, "CZ-327": true, "CZ-41": true, "CZ-411": true, - "CZ-412": true, "CZ-413": true, "CZ-42": true, "CZ-421": true, "CZ-422": true, - "CZ-423": true, "CZ-424": true, "CZ-425": true, "CZ-426": true, "CZ-427": true, - "CZ-51": true, "CZ-511": true, "CZ-512": true, "CZ-513": true, "CZ-514": true, - "CZ-52": true, "CZ-521": true, "CZ-522": true, "CZ-523": true, "CZ-524": true, - "CZ-525": true, "CZ-53": true, "CZ-531": true, "CZ-532": true, "CZ-533": true, - "CZ-534": true, "CZ-63": true, "CZ-631": true, "CZ-632": true, "CZ-633": true, - "CZ-634": true, "CZ-635": true, "CZ-64": true, "CZ-641": true, "CZ-642": true, - "CZ-643": true, "CZ-644": true, "CZ-645": true, "CZ-646": true, "CZ-647": true, - "CZ-71": true, "CZ-711": true, "CZ-712": true, "CZ-713": true, "CZ-714": true, - "CZ-715": true, "CZ-72": true, "CZ-721": true, "CZ-722": true, "CZ-723": true, - "CZ-724": true, "CZ-80": true, "CZ-801": true, "CZ-802": true, "CZ-803": true, - "CZ-804": true, "CZ-805": true, "CZ-806": true, "DE-BB": true, "DE-BE": true, - "DE-BW": true, "DE-BY": true, "DE-HB": true, "DE-HE": true, "DE-HH": true, - "DE-MV": true, "DE-NI": true, "DE-NW": true, "DE-RP": true, "DE-SH": true, - "DE-SL": true, "DE-SN": true, "DE-ST": true, "DE-TH": true, "DJ-AR": true, - "DJ-AS": true, "DJ-DI": true, "DJ-DJ": true, "DJ-OB": true, "DJ-TA": true, - "DK-81": true, "DK-82": true, "DK-83": true, "DK-84": true, "DK-85": true, - "DM-01": true, "DM-02": true, "DM-03": true, "DM-04": true, "DM-05": true, - "DM-06": true, "DM-07": true, "DM-08": true, "DM-09": true, "DM-10": true, - "DO-01": true, "DO-02": true, "DO-03": true, "DO-04": true, "DO-05": true, - "DO-06": true, "DO-07": true, "DO-08": true, "DO-09": true, "DO-10": true, - "DO-11": true, "DO-12": true, "DO-13": true, "DO-14": true, "DO-15": true, - "DO-16": true, "DO-17": true, "DO-18": true, "DO-19": true, "DO-20": true, - "DO-21": true, "DO-22": true, "DO-23": true, "DO-24": true, "DO-25": true, - "DO-26": true, "DO-27": true, "DO-28": true, "DO-29": true, "DO-30": true, "DO-31": true, - "DZ-01": true, "DZ-02": true, "DZ-03": true, "DZ-04": true, "DZ-05": true, - "DZ-06": true, "DZ-07": true, "DZ-08": true, "DZ-09": true, "DZ-10": true, - "DZ-11": true, "DZ-12": true, "DZ-13": true, "DZ-14": true, "DZ-15": true, - "DZ-16": true, "DZ-17": true, "DZ-18": true, "DZ-19": true, "DZ-20": true, - "DZ-21": true, "DZ-22": true, "DZ-23": true, "DZ-24": true, "DZ-25": true, - "DZ-26": true, "DZ-27": true, "DZ-28": true, "DZ-29": true, "DZ-30": true, - "DZ-31": true, "DZ-32": true, "DZ-33": true, "DZ-34": true, "DZ-35": true, - "DZ-36": true, "DZ-37": true, "DZ-38": true, "DZ-39": true, "DZ-40": true, - "DZ-41": true, "DZ-42": true, "DZ-43": true, "DZ-44": true, "DZ-45": true, - "DZ-46": true, "DZ-47": true, "DZ-48": true, "DZ-49": true, "DZ-51": true, - "DZ-53": true, "DZ-55": true, "DZ-56": true, "DZ-57": true, "EC-A": true, "EC-B": true, - "EC-C": true, "EC-D": true, "EC-E": true, "EC-F": true, "EC-G": true, - "EC-H": true, "EC-I": true, "EC-L": true, "EC-M": true, "EC-N": true, - "EC-O": true, "EC-P": true, "EC-R": true, "EC-S": true, "EC-SD": true, - "EC-SE": true, "EC-T": true, "EC-U": true, "EC-W": true, "EC-X": true, - "EC-Y": true, "EC-Z": true, "EE-37": true, "EE-39": true, "EE-44": true, "EE-45": true, - "EE-49": true, "EE-50": true, "EE-51": true, "EE-52": true, "EE-56": true, "EE-57": true, - "EE-59": true, "EE-60": true, "EE-64": true, "EE-65": true, "EE-67": true, "EE-68": true, - "EE-70": true, "EE-71": true, "EE-74": true, "EE-78": true, "EE-79": true, "EE-81": true, "EE-82": true, - "EE-84": true, "EE-86": true, "EE-87": true, "EG-ALX": true, "EG-ASN": true, "EG-AST": true, - "EG-BA": true, "EG-BH": true, "EG-BNS": true, "EG-C": true, "EG-DK": true, - "EG-DT": true, "EG-FYM": true, "EG-GH": true, "EG-GZ": true, "EG-HU": true, - "EG-IS": true, "EG-JS": true, "EG-KB": true, "EG-KFS": true, "EG-KN": true, - "EG-LX": true, "EG-MN": true, "EG-MNF": true, "EG-MT": true, "EG-PTS": true, "EG-SHG": true, - "EG-SHR": true, "EG-SIN": true, "EG-SU": true, "EG-SUZ": true, "EG-WAD": true, - "ER-AN": true, "ER-DK": true, "ER-DU": true, "ER-GB": true, "ER-MA": true, - "ER-SK": true, "ES-A": true, "ES-AB": true, "ES-AL": true, "ES-AN": true, - "ES-AR": true, "ES-AS": true, "ES-AV": true, "ES-B": true, "ES-BA": true, - "ES-BI": true, "ES-BU": true, "ES-C": true, "ES-CA": true, "ES-CB": true, - "ES-CC": true, "ES-CE": true, "ES-CL": true, "ES-CM": true, "ES-CN": true, - "ES-CO": true, "ES-CR": true, "ES-CS": true, "ES-CT": true, "ES-CU": true, - "ES-EX": true, "ES-GA": true, "ES-GC": true, "ES-GI": true, "ES-GR": true, - "ES-GU": true, "ES-H": true, "ES-HU": true, "ES-IB": true, "ES-J": true, - "ES-L": true, "ES-LE": true, "ES-LO": true, "ES-LU": true, "ES-M": true, - "ES-MA": true, "ES-MC": true, "ES-MD": true, "ES-ML": true, "ES-MU": true, - "ES-NA": true, "ES-NC": true, "ES-O": true, "ES-OR": true, "ES-P": true, - "ES-PM": true, "ES-PO": true, "ES-PV": true, "ES-RI": true, "ES-S": true, - "ES-SA": true, "ES-SE": true, "ES-SG": true, "ES-SO": true, "ES-SS": true, - "ES-T": true, "ES-TE": true, "ES-TF": true, "ES-TO": true, "ES-V": true, - "ES-VA": true, "ES-VC": true, "ES-VI": true, "ES-Z": true, "ES-ZA": true, - "ET-AA": true, "ET-AF": true, "ET-AM": true, "ET-BE": true, "ET-DD": true, - "ET-GA": true, "ET-HA": true, "ET-OR": true, "ET-SN": true, "ET-SO": true, - "ET-TI": true, "FI-01": true, "FI-02": true, "FI-03": true, "FI-04": true, - "FI-05": true, "FI-06": true, "FI-07": true, "FI-08": true, "FI-09": true, - "FI-10": true, "FI-11": true, "FI-12": true, "FI-13": true, "FI-14": true, - "FI-15": true, "FI-16": true, "FI-17": true, "FI-18": true, "FI-19": true, - "FJ-C": true, "FJ-E": true, "FJ-N": true, "FJ-R": true, "FJ-W": true, - "FM-KSA": true, "FM-PNI": true, "FM-TRK": true, "FM-YAP": true, "FR-01": true, - "FR-02": true, "FR-03": true, "FR-04": true, "FR-05": true, "FR-06": true, - "FR-07": true, "FR-08": true, "FR-09": true, "FR-10": true, "FR-11": true, - "FR-12": true, "FR-13": true, "FR-14": true, "FR-15": true, "FR-16": true, - "FR-17": true, "FR-18": true, "FR-19": true, "FR-20R": true, "FR-21": true, "FR-22": true, - "FR-23": true, "FR-24": true, "FR-25": true, "FR-26": true, "FR-27": true, - "FR-28": true, "FR-29": true, "FR-2A": true, "FR-2B": true, "FR-30": true, - "FR-31": true, "FR-32": true, "FR-33": true, "FR-34": true, "FR-35": true, - "FR-36": true, "FR-37": true, "FR-38": true, "FR-39": true, "FR-40": true, - "FR-41": true, "FR-42": true, "FR-43": true, "FR-44": true, "FR-45": true, - "FR-46": true, "FR-47": true, "FR-48": true, "FR-49": true, "FR-50": true, - "FR-51": true, "FR-52": true, "FR-53": true, "FR-54": true, "FR-55": true, - "FR-56": true, "FR-57": true, "FR-58": true, "FR-59": true, "FR-60": true, - "FR-61": true, "FR-62": true, "FR-63": true, "FR-64": true, "FR-65": true, - "FR-66": true, "FR-67": true, "FR-68": true, "FR-69": true, "FR-70": true, - "FR-71": true, "FR-72": true, "FR-73": true, "FR-74": true, "FR-75": true, - "FR-76": true, "FR-77": true, "FR-78": true, "FR-79": true, "FR-80": true, - "FR-81": true, "FR-82": true, "FR-83": true, "FR-84": true, "FR-85": true, - "FR-86": true, "FR-87": true, "FR-88": true, "FR-89": true, "FR-90": true, - "FR-91": true, "FR-92": true, "FR-93": true, "FR-94": true, "FR-95": true, - "FR-ARA": true, "FR-BFC": true, "FR-BL": true, "FR-BRE": true, "FR-COR": true, - "FR-CP": true, "FR-CVL": true, "FR-GES": true, "FR-GF": true, "FR-GP": true, - "FR-GUA": true, "FR-HDF": true, "FR-IDF": true, "FR-LRE": true, "FR-MAY": true, - "FR-MF": true, "FR-MQ": true, "FR-NAQ": true, "FR-NC": true, "FR-NOR": true, - "FR-OCC": true, "FR-PAC": true, "FR-PDL": true, "FR-PF": true, "FR-PM": true, - "FR-RE": true, "FR-TF": true, "FR-WF": true, "FR-YT": true, "GA-1": true, - "GA-2": true, "GA-3": true, "GA-4": true, "GA-5": true, "GA-6": true, - "GA-7": true, "GA-8": true, "GA-9": true, "GB-ABC": true, "GB-ABD": true, - "GB-ABE": true, "GB-AGB": true, "GB-AGY": true, "GB-AND": true, "GB-ANN": true, - "GB-ANS": true, "GB-BAS": true, "GB-BBD": true, "GB-BDF": true, "GB-BDG": true, - "GB-BEN": true, "GB-BEX": true, "GB-BFS": true, "GB-BGE": true, "GB-BGW": true, - "GB-BIR": true, "GB-BKM": true, "GB-BMH": true, "GB-BNE": true, "GB-BNH": true, - "GB-BNS": true, "GB-BOL": true, "GB-BPL": true, "GB-BRC": true, "GB-BRD": true, - "GB-BRY": true, "GB-BST": true, "GB-BUR": true, "GB-CAM": true, "GB-CAY": true, - "GB-CBF": true, "GB-CCG": true, "GB-CGN": true, "GB-CHE": true, "GB-CHW": true, - "GB-CLD": true, "GB-CLK": true, "GB-CMA": true, "GB-CMD": true, "GB-CMN": true, - "GB-CON": true, "GB-COV": true, "GB-CRF": true, "GB-CRY": true, "GB-CWY": true, - "GB-DAL": true, "GB-DBY": true, "GB-DEN": true, "GB-DER": true, "GB-DEV": true, - "GB-DGY": true, "GB-DNC": true, "GB-DND": true, "GB-DOR": true, "GB-DRS": true, - "GB-DUD": true, "GB-DUR": true, "GB-EAL": true, "GB-EAW": true, "GB-EAY": true, - "GB-EDH": true, "GB-EDU": true, "GB-ELN": true, "GB-ELS": true, "GB-ENF": true, - "GB-ENG": true, "GB-ERW": true, "GB-ERY": true, "GB-ESS": true, "GB-ESX": true, - "GB-FAL": true, "GB-FIF": true, "GB-FLN": true, "GB-FMO": true, "GB-GAT": true, - "GB-GBN": true, "GB-GLG": true, "GB-GLS": true, "GB-GRE": true, "GB-GWN": true, - "GB-HAL": true, "GB-HAM": true, "GB-HAV": true, "GB-HCK": true, "GB-HEF": true, - "GB-HIL": true, "GB-HLD": true, "GB-HMF": true, "GB-HNS": true, "GB-HPL": true, - "GB-HRT": true, "GB-HRW": true, "GB-HRY": true, "GB-IOS": true, "GB-IOW": true, - "GB-ISL": true, "GB-IVC": true, "GB-KEC": true, "GB-KEN": true, "GB-KHL": true, - "GB-KIR": true, "GB-KTT": true, "GB-KWL": true, "GB-LAN": true, "GB-LBC": true, - "GB-LBH": true, "GB-LCE": true, "GB-LDS": true, "GB-LEC": true, "GB-LEW": true, - "GB-LIN": true, "GB-LIV": true, "GB-LND": true, "GB-LUT": true, "GB-MAN": true, - "GB-MDB": true, "GB-MDW": true, "GB-MEA": true, "GB-MIK": true, "GD-01": true, - "GB-MLN": true, "GB-MON": true, "GB-MRT": true, "GB-MRY": true, "GB-MTY": true, - "GB-MUL": true, "GB-NAY": true, "GB-NBL": true, "GB-NEL": true, "GB-NET": true, - "GB-NFK": true, "GB-NGM": true, "GB-NIR": true, "GB-NLK": true, "GB-NLN": true, - "GB-NMD": true, "GB-NSM": true, "GB-NTH": true, "GB-NTL": true, "GB-NTT": true, - "GB-NTY": true, "GB-NWM": true, "GB-NWP": true, "GB-NYK": true, "GB-OLD": true, - "GB-ORK": true, "GB-OXF": true, "GB-PEM": true, "GB-PKN": true, "GB-PLY": true, - "GB-POL": true, "GB-POR": true, "GB-POW": true, "GB-PTE": true, "GB-RCC": true, - "GB-RCH": true, "GB-RCT": true, "GB-RDB": true, "GB-RDG": true, "GB-RFW": true, - "GB-RIC": true, "GB-ROT": true, "GB-RUT": true, "GB-SAW": true, "GB-SAY": true, - "GB-SCB": true, "GB-SCT": true, "GB-SFK": true, "GB-SFT": true, "GB-SGC": true, - "GB-SHF": true, "GB-SHN": true, "GB-SHR": true, "GB-SKP": true, "GB-SLF": true, - "GB-SLG": true, "GB-SLK": true, "GB-SND": true, "GB-SOL": true, "GB-SOM": true, - "GB-SOS": true, "GB-SRY": true, "GB-STE": true, "GB-STG": true, "GB-STH": true, - "GB-STN": true, "GB-STS": true, "GB-STT": true, "GB-STY": true, "GB-SWA": true, - "GB-SWD": true, "GB-SWK": true, "GB-TAM": true, "GB-TFW": true, "GB-THR": true, - "GB-TOB": true, "GB-TOF": true, "GB-TRF": true, "GB-TWH": true, "GB-UKM": true, - "GB-VGL": true, "GB-WAR": true, "GB-WBK": true, "GB-WDU": true, "GB-WFT": true, - "GB-WGN": true, "GB-WIL": true, "GB-WKF": true, "GB-WLL": true, "GB-WLN": true, - "GB-WLS": true, "GB-WLV": true, "GB-WND": true, "GB-WNM": true, "GB-WOK": true, - "GB-WOR": true, "GB-WRL": true, "GB-WRT": true, "GB-WRX": true, "GB-WSM": true, - "GB-WSX": true, "GB-YOR": true, "GB-ZET": true, "GD-02": true, "GD-03": true, - "GD-04": true, "GD-05": true, "GD-06": true, "GD-10": true, "GE-AB": true, - "GE-AJ": true, "GE-GU": true, "GE-IM": true, "GE-KA": true, "GE-KK": true, - "GE-MM": true, "GE-RL": true, "GE-SJ": true, "GE-SK": true, "GE-SZ": true, - "GE-TB": true, "GH-AA": true, "GH-AH": true, "GH-AF": true, "GH-BA": true, "GH-BO": true, "GH-BE": true, "GH-CP": true, - "GH-EP": true, "GH-NP": true, "GH-TV": true, "GH-UE": true, "GH-UW": true, - "GH-WP": true, "GL-AV": true, "GL-KU": true, "GL-QA": true, "GL-QT": true, "GL-QE": true, "GL-SM": true, - "GM-B": true, "GM-L": true, "GM-M": true, "GM-N": true, "GM-U": true, - "GM-W": true, "GN-B": true, "GN-BE": true, "GN-BF": true, "GN-BK": true, - "GN-C": true, "GN-CO": true, "GN-D": true, "GN-DB": true, "GN-DI": true, - "GN-DL": true, "GN-DU": true, "GN-F": true, "GN-FA": true, "GN-FO": true, - "GN-FR": true, "GN-GA": true, "GN-GU": true, "GN-K": true, "GN-KA": true, - "GN-KB": true, "GN-KD": true, "GN-KE": true, "GN-KN": true, "GN-KO": true, - "GN-KS": true, "GN-L": true, "GN-LA": true, "GN-LE": true, "GN-LO": true, - "GN-M": true, "GN-MC": true, "GN-MD": true, "GN-ML": true, "GN-MM": true, - "GN-N": true, "GN-NZ": true, "GN-PI": true, "GN-SI": true, "GN-TE": true, - "GN-TO": true, "GN-YO": true, "GQ-AN": true, "GQ-BN": true, "GQ-BS": true, - "GQ-C": true, "GQ-CS": true, "GQ-I": true, "GQ-KN": true, "GQ-LI": true, - "GQ-WN": true, "GR-01": true, "GR-03": true, "GR-04": true, "GR-05": true, - "GR-06": true, "GR-07": true, "GR-11": true, "GR-12": true, "GR-13": true, - "GR-14": true, "GR-15": true, "GR-16": true, "GR-17": true, "GR-21": true, - "GR-22": true, "GR-23": true, "GR-24": true, "GR-31": true, "GR-32": true, - "GR-33": true, "GR-34": true, "GR-41": true, "GR-42": true, "GR-43": true, - "GR-44": true, "GR-51": true, "GR-52": true, "GR-53": true, "GR-54": true, - "GR-55": true, "GR-56": true, "GR-57": true, "GR-58": true, "GR-59": true, - "GR-61": true, "GR-62": true, "GR-63": true, "GR-64": true, "GR-69": true, - "GR-71": true, "GR-72": true, "GR-73": true, "GR-81": true, "GR-82": true, - "GR-83": true, "GR-84": true, "GR-85": true, "GR-91": true, "GR-92": true, - "GR-93": true, "GR-94": true, "GR-A": true, "GR-A1": true, "GR-B": true, - "GR-C": true, "GR-D": true, "GR-E": true, "GR-F": true, "GR-G": true, - "GR-H": true, "GR-I": true, "GR-J": true, "GR-K": true, "GR-L": true, - "GR-M": true, "GT-01": true, "GT-02": true, "GT-03": true, "GT-04": true, - "GT-05": true, "GT-06": true, "GT-07": true, "GT-08": true, "GT-09": true, - "GT-10": true, "GT-11": true, "GT-12": true, "GT-13": true, "GT-14": true, - "GT-15": true, "GT-16": true, "GT-17": true, "GT-18": true, "GT-19": true, - "GT-20": true, "GT-21": true, "GT-22": true, "GW-BA": true, "GW-BL": true, - "GW-BM": true, "GW-BS": true, "GW-CA": true, "GW-GA": true, "GW-L": true, - "GW-N": true, "GW-OI": true, "GW-QU": true, "GW-S": true, "GW-TO": true, - "GY-BA": true, "GY-CU": true, "GY-DE": true, "GY-EB": true, "GY-ES": true, - "GY-MA": true, "GY-PM": true, "GY-PT": true, "GY-UD": true, "GY-UT": true, - "HN-AT": true, "HN-CH": true, "HN-CL": true, "HN-CM": true, "HN-CP": true, - "HN-CR": true, "HN-EP": true, "HN-FM": true, "HN-GD": true, "HN-IB": true, - "HN-IN": true, "HN-LE": true, "HN-LP": true, "HN-OC": true, "HN-OL": true, - "HN-SB": true, "HN-VA": true, "HN-YO": true, "HR-01": true, "HR-02": true, - "HR-03": true, "HR-04": true, "HR-05": true, "HR-06": true, "HR-07": true, - "HR-08": true, "HR-09": true, "HR-10": true, "HR-11": true, "HR-12": true, - "HR-13": true, "HR-14": true, "HR-15": true, "HR-16": true, "HR-17": true, - "HR-18": true, "HR-19": true, "HR-20": true, "HR-21": true, "HT-AR": true, - "HT-CE": true, "HT-GA": true, "HT-ND": true, "HT-NE": true, "HT-NO": true, "HT-NI": true, - "HT-OU": true, "HT-SD": true, "HT-SE": true, "HU-BA": true, "HU-BC": true, - "HU-BE": true, "HU-BK": true, "HU-BU": true, "HU-BZ": true, "HU-CS": true, - "HU-DE": true, "HU-DU": true, "HU-EG": true, "HU-ER": true, "HU-FE": true, - "HU-GS": true, "HU-GY": true, "HU-HB": true, "HU-HE": true, "HU-HV": true, - "HU-JN": true, "HU-KE": true, "HU-KM": true, "HU-KV": true, "HU-MI": true, - "HU-NK": true, "HU-NO": true, "HU-NY": true, "HU-PE": true, "HU-PS": true, - "HU-SD": true, "HU-SF": true, "HU-SH": true, "HU-SK": true, "HU-SN": true, - "HU-SO": true, "HU-SS": true, "HU-ST": true, "HU-SZ": true, "HU-TB": true, - "HU-TO": true, "HU-VA": true, "HU-VE": true, "HU-VM": true, "HU-ZA": true, - "HU-ZE": true, "ID-AC": true, "ID-BA": true, "ID-BB": true, "ID-BE": true, - "ID-BT": true, "ID-GO": true, "ID-IJ": true, "ID-JA": true, "ID-JB": true, - "ID-JI": true, "ID-JK": true, "ID-JT": true, "ID-JW": true, "ID-KA": true, - "ID-KB": true, "ID-KI": true, "ID-KU": true, "ID-KR": true, "ID-KS": true, - "ID-KT": true, "ID-LA": true, "ID-MA": true, "ID-ML": true, "ID-MU": true, - "ID-NB": true, "ID-NT": true, "ID-NU": true, "ID-PA": true, "ID-PB": true, - "ID-PE": true, "ID-PP": true, "ID-PS": true, "ID-PT": true, "ID-RI": true, - "ID-SA": true, "ID-SB": true, "ID-SG": true, "ID-SL": true, "ID-SM": true, - "ID-SN": true, "ID-SR": true, "ID-SS": true, "ID-ST": true, "ID-SU": true, - "ID-YO": true, "IE-C": true, "IE-CE": true, "IE-CN": true, "IE-CO": true, - "IE-CW": true, "IE-D": true, "IE-DL": true, "IE-G": true, "IE-KE": true, - "IE-KK": true, "IE-KY": true, "IE-L": true, "IE-LD": true, "IE-LH": true, - "IE-LK": true, "IE-LM": true, "IE-LS": true, "IE-M": true, "IE-MH": true, - "IE-MN": true, "IE-MO": true, "IE-OY": true, "IE-RN": true, "IE-SO": true, - "IE-TA": true, "IE-U": true, "IE-WD": true, "IE-WH": true, "IE-WW": true, - "IE-WX": true, "IL-D": true, "IL-HA": true, "IL-JM": true, "IL-M": true, - "IL-TA": true, "IL-Z": true, "IN-AN": true, "IN-AP": true, "IN-AR": true, - "IN-AS": true, "IN-BR": true, "IN-CH": true, "IN-CT": true, "IN-DH": true, - "IN-DL": true, "IN-DN": true, "IN-GA": true, "IN-GJ": true, "IN-HP": true, - "IN-HR": true, "IN-JH": true, "IN-JK": true, "IN-KA": true, "IN-KL": true, - "IN-LD": true, "IN-MH": true, "IN-ML": true, "IN-MN": true, "IN-MP": true, - "IN-MZ": true, "IN-NL": true, "IN-TG": true, "IN-OR": true, "IN-PB": true, "IN-PY": true, - "IN-RJ": true, "IN-SK": true, "IN-TN": true, "IN-TR": true, "IN-UP": true, - "IN-UT": true, "IN-WB": true, "IQ-AN": true, "IQ-AR": true, "IQ-BA": true, - "IQ-BB": true, "IQ-BG": true, "IQ-DA": true, "IQ-DI": true, "IQ-DQ": true, - "IQ-KA": true, "IQ-KI": true, "IQ-MA": true, "IQ-MU": true, "IQ-NA": true, "IQ-NI": true, - "IQ-QA": true, "IQ-SD": true, "IQ-SW": true, "IQ-SU": true, "IQ-TS": true, "IQ-WA": true, - "IR-00": true, "IR-01": true, "IR-02": true, "IR-03": true, "IR-04": true, "IR-05": true, - "IR-06": true, "IR-07": true, "IR-08": true, "IR-09": true, "IR-10": true, "IR-11": true, - "IR-12": true, "IR-13": true, "IR-14": true, "IR-15": true, "IR-16": true, - "IR-17": true, "IR-18": true, "IR-19": true, "IR-20": true, "IR-21": true, - "IR-22": true, "IR-23": true, "IR-24": true, "IR-25": true, "IR-26": true, - "IR-27": true, "IR-28": true, "IR-29": true, "IR-30": true, "IR-31": true, - "IS-0": true, "IS-1": true, "IS-2": true, "IS-3": true, "IS-4": true, - "IS-5": true, "IS-6": true, "IS-7": true, "IS-8": true, "IT-21": true, - "IT-23": true, "IT-25": true, "IT-32": true, "IT-34": true, "IT-36": true, - "IT-42": true, "IT-45": true, "IT-52": true, "IT-55": true, "IT-57": true, - "IT-62": true, "IT-65": true, "IT-67": true, "IT-72": true, "IT-75": true, - "IT-77": true, "IT-78": true, "IT-82": true, "IT-88": true, "IT-AG": true, - "IT-AL": true, "IT-AN": true, "IT-AO": true, "IT-AP": true, "IT-AQ": true, - "IT-AR": true, "IT-AT": true, "IT-AV": true, "IT-BA": true, "IT-BG": true, - "IT-BI": true, "IT-BL": true, "IT-BN": true, "IT-BO": true, "IT-BR": true, - "IT-BS": true, "IT-BT": true, "IT-BZ": true, "IT-CA": true, "IT-CB": true, - "IT-CE": true, "IT-CH": true, "IT-CI": true, "IT-CL": true, "IT-CN": true, - "IT-CO": true, "IT-CR": true, "IT-CS": true, "IT-CT": true, "IT-CZ": true, - "IT-EN": true, "IT-FC": true, "IT-FE": true, "IT-FG": true, "IT-FI": true, - "IT-FM": true, "IT-FR": true, "IT-GE": true, "IT-GO": true, "IT-GR": true, - "IT-IM": true, "IT-IS": true, "IT-KR": true, "IT-LC": true, "IT-LE": true, - "IT-LI": true, "IT-LO": true, "IT-LT": true, "IT-LU": true, "IT-MB": true, - "IT-MC": true, "IT-ME": true, "IT-MI": true, "IT-MN": true, "IT-MO": true, - "IT-MS": true, "IT-MT": true, "IT-NA": true, "IT-NO": true, "IT-NU": true, - "IT-OG": true, "IT-OR": true, "IT-OT": true, "IT-PA": true, "IT-PC": true, - "IT-PD": true, "IT-PE": true, "IT-PG": true, "IT-PI": true, "IT-PN": true, - "IT-PO": true, "IT-PR": true, "IT-PT": true, "IT-PU": true, "IT-PV": true, - "IT-PZ": true, "IT-RA": true, "IT-RC": true, "IT-RE": true, "IT-RG": true, - "IT-RI": true, "IT-RM": true, "IT-RN": true, "IT-RO": true, "IT-SA": true, - "IT-SI": true, "IT-SO": true, "IT-SP": true, "IT-SR": true, "IT-SS": true, - "IT-SV": true, "IT-TA": true, "IT-TE": true, "IT-TN": true, "IT-TO": true, - "IT-TP": true, "IT-TR": true, "IT-TS": true, "IT-TV": true, "IT-UD": true, - "IT-VA": true, "IT-VB": true, "IT-VC": true, "IT-VE": true, "IT-VI": true, - "IT-VR": true, "IT-VS": true, "IT-VT": true, "IT-VV": true, "JM-01": true, - "JM-02": true, "JM-03": true, "JM-04": true, "JM-05": true, "JM-06": true, - "JM-07": true, "JM-08": true, "JM-09": true, "JM-10": true, "JM-11": true, - "JM-12": true, "JM-13": true, "JM-14": true, "JO-AJ": true, "JO-AM": true, - "JO-AQ": true, "JO-AT": true, "JO-AZ": true, "JO-BA": true, "JO-IR": true, - "JO-JA": true, "JO-KA": true, "JO-MA": true, "JO-MD": true, "JO-MN": true, - "JP-01": true, "JP-02": true, "JP-03": true, "JP-04": true, "JP-05": true, - "JP-06": true, "JP-07": true, "JP-08": true, "JP-09": true, "JP-10": true, - "JP-11": true, "JP-12": true, "JP-13": true, "JP-14": true, "JP-15": true, - "JP-16": true, "JP-17": true, "JP-18": true, "JP-19": true, "JP-20": true, - "JP-21": true, "JP-22": true, "JP-23": true, "JP-24": true, "JP-25": true, - "JP-26": true, "JP-27": true, "JP-28": true, "JP-29": true, "JP-30": true, - "JP-31": true, "JP-32": true, "JP-33": true, "JP-34": true, "JP-35": true, - "JP-36": true, "JP-37": true, "JP-38": true, "JP-39": true, "JP-40": true, - "JP-41": true, "JP-42": true, "JP-43": true, "JP-44": true, "JP-45": true, - "JP-46": true, "JP-47": true, "KE-01": true, "KE-02": true, "KE-03": true, - "KE-04": true, "KE-05": true, "KE-06": true, "KE-07": true, "KE-08": true, - "KE-09": true, "KE-10": true, "KE-11": true, "KE-12": true, "KE-13": true, - "KE-14": true, "KE-15": true, "KE-16": true, "KE-17": true, "KE-18": true, - "KE-19": true, "KE-20": true, "KE-21": true, "KE-22": true, "KE-23": true, - "KE-24": true, "KE-25": true, "KE-26": true, "KE-27": true, "KE-28": true, - "KE-29": true, "KE-30": true, "KE-31": true, "KE-32": true, "KE-33": true, - "KE-34": true, "KE-35": true, "KE-36": true, "KE-37": true, "KE-38": true, - "KE-39": true, "KE-40": true, "KE-41": true, "KE-42": true, "KE-43": true, - "KE-44": true, "KE-45": true, "KE-46": true, "KE-47": true, "KG-B": true, - "KG-C": true, "KG-GB": true, "KG-GO": true, "KG-J": true, "KG-N": true, "KG-O": true, - "KG-T": true, "KG-Y": true, "KH-1": true, "KH-10": true, "KH-11": true, - "KH-12": true, "KH-13": true, "KH-14": true, "KH-15": true, "KH-16": true, - "KH-17": true, "KH-18": true, "KH-19": true, "KH-2": true, "KH-20": true, - "KH-21": true, "KH-22": true, "KH-23": true, "KH-24": true, "KH-3": true, - "KH-4": true, "KH-5": true, "KH-6": true, "KH-7": true, "KH-8": true, - "KH-9": true, "KI-G": true, "KI-L": true, "KI-P": true, "KM-A": true, - "KM-G": true, "KM-M": true, "KN-01": true, "KN-02": true, "KN-03": true, - "KN-04": true, "KN-05": true, "KN-06": true, "KN-07": true, "KN-08": true, - "KN-09": true, "KN-10": true, "KN-11": true, "KN-12": true, "KN-13": true, - "KN-15": true, "KN-K": true, "KN-N": true, "KP-01": true, "KP-02": true, - "KP-03": true, "KP-04": true, "KP-05": true, "KP-06": true, "KP-07": true, - "KP-08": true, "KP-09": true, "KP-10": true, "KP-13": true, "KR-11": true, - "KR-26": true, "KR-27": true, "KR-28": true, "KR-29": true, "KR-30": true, - "KR-31": true, "KR-41": true, "KR-42": true, "KR-43": true, "KR-44": true, - "KR-45": true, "KR-46": true, "KR-47": true, "KR-48": true, "KR-49": true, - "KW-AH": true, "KW-FA": true, "KW-HA": true, "KW-JA": true, "KW-KU": true, - "KW-MU": true, "KZ-10": true, "KZ-75": true, "KZ-19": true, "KZ-11": true, - "KZ-15": true, "KZ-71": true, "KZ-23": true, "KZ-27": true, "KZ-47": true, - "KZ-55": true, "KZ-35": true, "KZ-39": true, "KZ-43": true, "KZ-63": true, - "KZ-79": true, "KZ-59": true, "KZ-61": true, "KZ-62": true, "KZ-31": true, - "KZ-33": true, "LA-AT": true, "LA-BK": true, "LA-BL": true, - "LA-CH": true, "LA-HO": true, "LA-KH": true, "LA-LM": true, "LA-LP": true, - "LA-OU": true, "LA-PH": true, "LA-SL": true, "LA-SV": true, "LA-VI": true, - "LA-VT": true, "LA-XA": true, "LA-XE": true, "LA-XI": true, "LA-XS": true, - "LB-AK": true, "LB-AS": true, "LB-BA": true, "LB-BH": true, "LB-BI": true, - "LB-JA": true, "LB-JL": true, "LB-NA": true, "LC-01": true, "LC-02": true, - "LC-03": true, "LC-05": true, "LC-06": true, "LC-07": true, "LC-08": true, - "LC-10": true, "LC-11": true, "LI-01": true, "LI-02": true, - "LI-03": true, "LI-04": true, "LI-05": true, "LI-06": true, "LI-07": true, - "LI-08": true, "LI-09": true, "LI-10": true, "LI-11": true, "LK-1": true, - "LK-11": true, "LK-12": true, "LK-13": true, "LK-2": true, "LK-21": true, - "LK-22": true, "LK-23": true, "LK-3": true, "LK-31": true, "LK-32": true, - "LK-33": true, "LK-4": true, "LK-41": true, "LK-42": true, "LK-43": true, - "LK-44": true, "LK-45": true, "LK-5": true, "LK-51": true, "LK-52": true, - "LK-53": true, "LK-6": true, "LK-61": true, "LK-62": true, "LK-7": true, - "LK-71": true, "LK-72": true, "LK-8": true, "LK-81": true, "LK-82": true, - "LK-9": true, "LK-91": true, "LK-92": true, "LR-BG": true, "LR-BM": true, - "LR-CM": true, "LR-GB": true, "LR-GG": true, "LR-GK": true, "LR-LO": true, - "LR-MG": true, "LR-MO": true, "LR-MY": true, "LR-NI": true, "LR-RI": true, - "LR-SI": true, "LS-A": true, "LS-B": true, "LS-C": true, "LS-D": true, - "LS-E": true, "LS-F": true, "LS-G": true, "LS-H": true, "LS-J": true, - "LS-K": true, "LT-AL": true, "LT-KL": true, "LT-KU": true, "LT-MR": true, - "LT-PN": true, "LT-SA": true, "LT-TA": true, "LT-TE": true, "LT-UT": true, - "LT-VL": true, "LU-CA": true, "LU-CL": true, "LU-DI": true, "LU-EC": true, - "LU-ES": true, "LU-GR": true, "LU-LU": true, "LU-ME": true, "LU-RD": true, - "LU-RM": true, "LU-VD": true, "LU-WI": true, "LU-D": true, "LU-G": true, "LU-L": true, - "LV-001": true, "LV-111": true, "LV-112": true, "LV-113": true, - "LV-002": true, "LV-003": true, "LV-004": true, "LV-005": true, "LV-006": true, - "LV-007": true, "LV-008": true, "LV-009": true, "LV-010": true, "LV-011": true, - "LV-012": true, "LV-013": true, "LV-014": true, "LV-015": true, "LV-016": true, - "LV-017": true, "LV-018": true, "LV-019": true, "LV-020": true, "LV-021": true, - "LV-022": true, "LV-023": true, "LV-024": true, "LV-025": true, "LV-026": true, - "LV-027": true, "LV-028": true, "LV-029": true, "LV-030": true, "LV-031": true, - "LV-032": true, "LV-033": true, "LV-034": true, "LV-035": true, "LV-036": true, - "LV-037": true, "LV-038": true, "LV-039": true, "LV-040": true, "LV-041": true, - "LV-042": true, "LV-043": true, "LV-044": true, "LV-045": true, "LV-046": true, - "LV-047": true, "LV-048": true, "LV-049": true, "LV-050": true, "LV-051": true, - "LV-052": true, "LV-053": true, "LV-054": true, "LV-055": true, "LV-056": true, - "LV-057": true, "LV-058": true, "LV-059": true, "LV-060": true, "LV-061": true, - "LV-062": true, "LV-063": true, "LV-064": true, "LV-065": true, "LV-066": true, - "LV-067": true, "LV-068": true, "LV-069": true, "LV-070": true, "LV-071": true, - "LV-072": true, "LV-073": true, "LV-074": true, "LV-075": true, "LV-076": true, - "LV-077": true, "LV-078": true, "LV-079": true, "LV-080": true, "LV-081": true, - "LV-082": true, "LV-083": true, "LV-084": true, "LV-085": true, "LV-086": true, - "LV-087": true, "LV-088": true, "LV-089": true, "LV-090": true, "LV-091": true, - "LV-092": true, "LV-093": true, "LV-094": true, "LV-095": true, "LV-096": true, - "LV-097": true, "LV-098": true, "LV-099": true, "LV-100": true, "LV-101": true, - "LV-102": true, "LV-103": true, "LV-104": true, "LV-105": true, "LV-106": true, - "LV-107": true, "LV-108": true, "LV-109": true, "LV-110": true, "LV-DGV": true, - "LV-JEL": true, "LV-JKB": true, "LV-JUR": true, "LV-LPX": true, "LV-REZ": true, - "LV-RIX": true, "LV-VEN": true, "LV-VMR": true, "LY-BA": true, "LY-BU": true, - "LY-DR": true, "LY-GT": true, "LY-JA": true, "LY-JB": true, "LY-JG": true, - "LY-JI": true, "LY-JU": true, "LY-KF": true, "LY-MB": true, "LY-MI": true, - "LY-MJ": true, "LY-MQ": true, "LY-NL": true, "LY-NQ": true, "LY-SB": true, - "LY-SR": true, "LY-TB": true, "LY-WA": true, "LY-WD": true, "LY-WS": true, - "LY-ZA": true, "MA-01": true, "MA-02": true, "MA-03": true, "MA-04": true, - "MA-05": true, "MA-06": true, "MA-07": true, "MA-08": true, "MA-09": true, - "MA-10": true, "MA-11": true, "MA-12": true, "MA-13": true, "MA-14": true, - "MA-15": true, "MA-16": true, "MA-AGD": true, "MA-AOU": true, "MA-ASZ": true, - "MA-AZI": true, "MA-BEM": true, "MA-BER": true, "MA-BES": true, "MA-BOD": true, - "MA-BOM": true, "MA-CAS": true, "MA-CHE": true, "MA-CHI": true, "MA-CHT": true, - "MA-ERR": true, "MA-ESI": true, "MA-ESM": true, "MA-FAH": true, "MA-FES": true, - "MA-FIG": true, "MA-GUE": true, "MA-HAJ": true, "MA-HAO": true, "MA-HOC": true, - "MA-IFR": true, "MA-INE": true, "MA-JDI": true, "MA-JRA": true, "MA-KEN": true, - "MA-KES": true, "MA-KHE": true, "MA-KHN": true, "MA-KHO": true, "MA-LAA": true, - "MA-LAR": true, "MA-MED": true, "MA-MEK": true, "MA-MMD": true, "MA-MMN": true, - "MA-MOH": true, "MA-MOU": true, "MA-NAD": true, "MA-NOU": true, "MA-OUA": true, - "MA-OUD": true, "MA-OUJ": true, "MA-RAB": true, "MA-SAF": true, "MA-SAL": true, - "MA-SEF": true, "MA-SET": true, "MA-SIK": true, "MA-SKH": true, "MA-SYB": true, - "MA-TAI": true, "MA-TAO": true, "MA-TAR": true, "MA-TAT": true, "MA-TAZ": true, - "MA-TET": true, "MA-TIZ": true, "MA-TNG": true, "MA-TNT": true, "MA-ZAG": true, - "MC-CL": true, "MC-CO": true, "MC-FO": true, "MC-GA": true, "MC-JE": true, - "MC-LA": true, "MC-MA": true, "MC-MC": true, "MC-MG": true, "MC-MO": true, - "MC-MU": true, "MC-PH": true, "MC-SD": true, "MC-SO": true, "MC-SP": true, - "MC-SR": true, "MC-VR": true, "MD-AN": true, "MD-BA": true, "MD-BD": true, - "MD-BR": true, "MD-BS": true, "MD-CA": true, "MD-CL": true, "MD-CM": true, - "MD-CR": true, "MD-CS": true, "MD-CT": true, "MD-CU": true, "MD-DO": true, - "MD-DR": true, "MD-DU": true, "MD-ED": true, "MD-FA": true, "MD-FL": true, - "MD-GA": true, "MD-GL": true, "MD-HI": true, "MD-IA": true, "MD-LE": true, - "MD-NI": true, "MD-OC": true, "MD-OR": true, "MD-RE": true, "MD-RI": true, - "MD-SD": true, "MD-SI": true, "MD-SN": true, "MD-SO": true, "MD-ST": true, - "MD-SV": true, "MD-TA": true, "MD-TE": true, "MD-UN": true, "ME-01": true, - "ME-02": true, "ME-03": true, "ME-04": true, "ME-05": true, "ME-06": true, - "ME-07": true, "ME-08": true, "ME-09": true, "ME-10": true, "ME-11": true, - "ME-12": true, "ME-13": true, "ME-14": true, "ME-15": true, "ME-16": true, - "ME-17": true, "ME-18": true, "ME-19": true, "ME-20": true, "ME-21": true, "ME-24": true, - "MG-A": true, "MG-D": true, "MG-F": true, "MG-M": true, "MG-T": true, - "MG-U": true, "MH-ALK": true, "MH-ALL": true, "MH-ARN": true, "MH-AUR": true, - "MH-EBO": true, "MH-ENI": true, "MH-JAB": true, "MH-JAL": true, "MH-KIL": true, - "MH-KWA": true, "MH-L": true, "MH-LAE": true, "MH-LIB": true, "MH-LIK": true, - "MH-MAJ": true, "MH-MAL": true, "MH-MEJ": true, "MH-MIL": true, "MH-NMK": true, - "MH-NMU": true, "MH-RON": true, "MH-T": true, "MH-UJA": true, "MH-UTI": true, - "MH-WTJ": true, "MH-WTN": true, "MK-101": true, "MK-102": true, "MK-103": true, - "MK-104": true, "MK-105": true, - "MK-106": true, "MK-107": true, "MK-108": true, "MK-109": true, "MK-201": true, - "MK-202": true, "MK-205": true, "MK-206": true, "MK-207": true, "MK-208": true, - "MK-209": true, "MK-210": true, "MK-211": true, "MK-301": true, "MK-303": true, - "MK-307": true, "MK-308": true, "MK-310": true, "MK-311": true, "MK-312": true, - "MK-401": true, "MK-402": true, "MK-403": true, "MK-404": true, "MK-405": true, - "MK-406": true, "MK-408": true, "MK-409": true, "MK-410": true, "MK-501": true, - "MK-502": true, "MK-503": true, "MK-505": true, "MK-506": true, "MK-507": true, - "MK-508": true, "MK-509": true, "MK-601": true, "MK-602": true, "MK-604": true, - "MK-605": true, "MK-606": true, "MK-607": true, "MK-608": true, "MK-609": true, - "MK-701": true, "MK-702": true, "MK-703": true, "MK-704": true, "MK-705": true, - "MK-803": true, "MK-804": true, "MK-806": true, "MK-807": true, "MK-809": true, - "MK-810": true, "MK-811": true, "MK-812": true, "MK-813": true, "MK-814": true, - "MK-816": true, "ML-1": true, "ML-2": true, "ML-3": true, "ML-4": true, - "ML-5": true, "ML-6": true, "ML-7": true, "ML-8": true, "ML-BKO": true, - "MM-01": true, "MM-02": true, "MM-03": true, "MM-04": true, "MM-05": true, - "MM-06": true, "MM-07": true, "MM-11": true, "MM-12": true, "MM-13": true, - "MM-14": true, "MM-15": true, "MM-16": true, "MM-17": true, "MM-18": true, "MN-035": true, - "MN-037": true, "MN-039": true, "MN-041": true, "MN-043": true, "MN-046": true, - "MN-047": true, "MN-049": true, "MN-051": true, "MN-053": true, "MN-055": true, - "MN-057": true, "MN-059": true, "MN-061": true, "MN-063": true, "MN-064": true, - "MN-065": true, "MN-067": true, "MN-069": true, "MN-071": true, "MN-073": true, - "MN-1": true, "MR-01": true, "MR-02": true, "MR-03": true, "MR-04": true, - "MR-05": true, "MR-06": true, "MR-07": true, "MR-08": true, "MR-09": true, - "MR-10": true, "MR-11": true, "MR-12": true, "MR-13": true, "MR-NKC": true, "MT-01": true, - "MT-02": true, "MT-03": true, "MT-04": true, "MT-05": true, "MT-06": true, - "MT-07": true, "MT-08": true, "MT-09": true, "MT-10": true, "MT-11": true, - "MT-12": true, "MT-13": true, "MT-14": true, "MT-15": true, "MT-16": true, - "MT-17": true, "MT-18": true, "MT-19": true, "MT-20": true, "MT-21": true, - "MT-22": true, "MT-23": true, "MT-24": true, "MT-25": true, "MT-26": true, - "MT-27": true, "MT-28": true, "MT-29": true, "MT-30": true, "MT-31": true, - "MT-32": true, "MT-33": true, "MT-34": true, "MT-35": true, "MT-36": true, - "MT-37": true, "MT-38": true, "MT-39": true, "MT-40": true, "MT-41": true, - "MT-42": true, "MT-43": true, "MT-44": true, "MT-45": true, "MT-46": true, - "MT-47": true, "MT-48": true, "MT-49": true, "MT-50": true, "MT-51": true, - "MT-52": true, "MT-53": true, "MT-54": true, "MT-55": true, "MT-56": true, - "MT-57": true, "MT-58": true, "MT-59": true, "MT-60": true, "MT-61": true, - "MT-62": true, "MT-63": true, "MT-64": true, "MT-65": true, "MT-66": true, - "MT-67": true, "MT-68": true, "MU-AG": true, "MU-BL": true, "MU-BR": true, - "MU-CC": true, "MU-CU": true, "MU-FL": true, "MU-GP": true, "MU-MO": true, - "MU-PA": true, "MU-PL": true, "MU-PU": true, "MU-PW": true, "MU-QB": true, - "MU-RO": true, "MU-RP": true, "MU-RR": true, "MU-SA": true, "MU-VP": true, "MV-00": true, - "MV-01": true, "MV-02": true, "MV-03": true, "MV-04": true, "MV-05": true, - "MV-07": true, "MV-08": true, "MV-12": true, "MV-13": true, "MV-14": true, - "MV-17": true, "MV-20": true, "MV-23": true, "MV-24": true, "MV-25": true, - "MV-26": true, "MV-27": true, "MV-28": true, "MV-29": true, "MV-CE": true, - "MV-MLE": true, "MV-NC": true, "MV-NO": true, "MV-SC": true, "MV-SU": true, - "MV-UN": true, "MV-US": true, "MW-BA": true, "MW-BL": true, "MW-C": true, - "MW-CK": true, "MW-CR": true, "MW-CT": true, "MW-DE": true, "MW-DO": true, - "MW-KR": true, "MW-KS": true, "MW-LI": true, "MW-LK": true, "MW-MC": true, - "MW-MG": true, "MW-MH": true, "MW-MU": true, "MW-MW": true, "MW-MZ": true, - "MW-N": true, "MW-NB": true, "MW-NE": true, "MW-NI": true, "MW-NK": true, - "MW-NS": true, "MW-NU": true, "MW-PH": true, "MW-RU": true, "MW-S": true, - "MW-SA": true, "MW-TH": true, "MW-ZO": true, "MX-AGU": true, "MX-BCN": true, - "MX-BCS": true, "MX-CAM": true, "MX-CHH": true, "MX-CHP": true, "MX-COA": true, - "MX-COL": true, "MX-CMX": true, "MX-DIF": true, "MX-DUR": true, "MX-GRO": true, "MX-GUA": true, - "MX-HID": true, "MX-JAL": true, "MX-MEX": true, "MX-MIC": true, "MX-MOR": true, - "MX-NAY": true, "MX-NLE": true, "MX-OAX": true, "MX-PUE": true, "MX-QUE": true, - "MX-ROO": true, "MX-SIN": true, "MX-SLP": true, "MX-SON": true, "MX-TAB": true, - "MX-TAM": true, "MX-TLA": true, "MX-VER": true, "MX-YUC": true, "MX-ZAC": true, - "MY-01": true, "MY-02": true, "MY-03": true, "MY-04": true, "MY-05": true, - "MY-06": true, "MY-07": true, "MY-08": true, "MY-09": true, "MY-10": true, - "MY-11": true, "MY-12": true, "MY-13": true, "MY-14": true, "MY-15": true, - "MY-16": true, "MZ-A": true, "MZ-B": true, "MZ-G": true, "MZ-I": true, - "MZ-L": true, "MZ-MPM": true, "MZ-N": true, "MZ-P": true, "MZ-Q": true, - "MZ-S": true, "MZ-T": true, "NA-CA": true, "NA-ER": true, "NA-HA": true, - "NA-KA": true, "NA-KE": true, "NA-KH": true, "NA-KU": true, "NA-KW": true, "NA-OD": true, "NA-OH": true, - "NA-OK": true, "NA-ON": true, "NA-OS": true, "NA-OT": true, "NA-OW": true, - "NE-1": true, "NE-2": true, "NE-3": true, "NE-4": true, "NE-5": true, - "NE-6": true, "NE-7": true, "NE-8": true, "NG-AB": true, "NG-AD": true, - "NG-AK": true, "NG-AN": true, "NG-BA": true, "NG-BE": true, "NG-BO": true, - "NG-BY": true, "NG-CR": true, "NG-DE": true, "NG-EB": true, "NG-ED": true, - "NG-EK": true, "NG-EN": true, "NG-FC": true, "NG-GO": true, "NG-IM": true, - "NG-JI": true, "NG-KD": true, "NG-KE": true, "NG-KN": true, "NG-KO": true, - "NG-KT": true, "NG-KW": true, "NG-LA": true, "NG-NA": true, "NG-NI": true, - "NG-OG": true, "NG-ON": true, "NG-OS": true, "NG-OY": true, "NG-PL": true, - "NG-RI": true, "NG-SO": true, "NG-TA": true, "NG-YO": true, "NG-ZA": true, - "NI-AN": true, "NI-AS": true, "NI-BO": true, "NI-CA": true, "NI-CI": true, - "NI-CO": true, "NI-ES": true, "NI-GR": true, "NI-JI": true, "NI-LE": true, - "NI-MD": true, "NI-MN": true, "NI-MS": true, "NI-MT": true, "NI-NS": true, - "NI-RI": true, "NI-SJ": true, "NL-AW": true, "NL-BQ1": true, "NL-BQ2": true, - "NL-BQ3": true, "NL-CW": true, "NL-DR": true, "NL-FL": true, "NL-FR": true, - "NL-GE": true, "NL-GR": true, "NL-LI": true, "NL-NB": true, "NL-NH": true, - "NL-OV": true, "NL-SX": true, "NL-UT": true, "NL-ZE": true, "NL-ZH": true, - "NO-03": true, "NO-11": true, "NO-15": true, "NO-16": true, "NO-17": true, - "NO-18": true, "NO-21": true, "NO-30": true, "NO-34": true, "NO-38": true, - "NO-42": true, "NO-46": true, "NO-50": true, "NO-54": true, - "NO-22": true, "NP-1": true, "NP-2": true, "NP-3": true, "NP-4": true, - "NP-5": true, "NP-BA": true, "NP-BH": true, "NP-DH": true, "NP-GA": true, - "NP-JA": true, "NP-KA": true, "NP-KO": true, "NP-LU": true, "NP-MA": true, - "NP-ME": true, "NP-NA": true, "NP-RA": true, "NP-SA": true, "NP-SE": true, - "NR-01": true, "NR-02": true, "NR-03": true, "NR-04": true, "NR-05": true, - "NR-06": true, "NR-07": true, "NR-08": true, "NR-09": true, "NR-10": true, - "NR-11": true, "NR-12": true, "NR-13": true, "NR-14": true, "NZ-AUK": true, - "NZ-BOP": true, "NZ-CAN": true, "NZ-CIT": true, "NZ-GIS": true, "NZ-HKB": true, - "NZ-MBH": true, "NZ-MWT": true, "NZ-N": true, "NZ-NSN": true, "NZ-NTL": true, - "NZ-OTA": true, "NZ-S": true, "NZ-STL": true, "NZ-TAS": true, "NZ-TKI": true, - "NZ-WGN": true, "NZ-WKO": true, "NZ-WTC": true, "OM-BA": true, "OM-BS": true, "OM-BU": true, "OM-BJ": true, - "OM-DA": true, "OM-MA": true, "OM-MU": true, "OM-SH": true, "OM-SJ": true, "OM-SS": true, "OM-WU": true, - "OM-ZA": true, "OM-ZU": true, "PA-1": true, "PA-2": true, "PA-3": true, - "PA-4": true, "PA-5": true, "PA-6": true, "PA-7": true, "PA-8": true, - "PA-9": true, "PA-EM": true, "PA-KY": true, "PA-NB": true, "PE-AMA": true, - "PE-ANC": true, "PE-APU": true, "PE-ARE": true, "PE-AYA": true, "PE-CAJ": true, - "PE-CAL": true, "PE-CUS": true, "PE-HUC": true, "PE-HUV": true, "PE-ICA": true, - "PE-JUN": true, "PE-LAL": true, "PE-LAM": true, "PE-LIM": true, "PE-LMA": true, - "PE-LOR": true, "PE-MDD": true, "PE-MOQ": true, "PE-PAS": true, "PE-PIU": true, - "PE-PUN": true, "PE-SAM": true, "PE-TAC": true, "PE-TUM": true, "PE-UCA": true, - "PG-CPK": true, "PG-CPM": true, "PG-EBR": true, "PG-EHG": true, "PG-EPW": true, - "PG-ESW": true, "PG-GPK": true, "PG-MBA": true, "PG-MPL": true, "PG-MPM": true, - "PG-MRL": true, "PG-NCD": true, "PG-NIK": true, "PG-NPP": true, "PG-NSB": true, - "PG-SAN": true, "PG-SHM": true, "PG-WBK": true, "PG-WHM": true, "PG-WPD": true, - "PH-00": true, "PH-01": true, "PH-02": true, "PH-03": true, "PH-05": true, - "PH-06": true, "PH-07": true, "PH-08": true, "PH-09": true, "PH-10": true, - "PH-11": true, "PH-12": true, "PH-13": true, "PH-14": true, "PH-15": true, - "PH-40": true, "PH-41": true, "PH-ABR": true, "PH-AGN": true, "PH-AGS": true, - "PH-AKL": true, "PH-ALB": true, "PH-ANT": true, "PH-APA": true, "PH-AUR": true, - "PH-BAN": true, "PH-BAS": true, "PH-BEN": true, "PH-BIL": true, "PH-BOH": true, - "PH-BTG": true, "PH-BTN": true, "PH-BUK": true, "PH-BUL": true, "PH-CAG": true, - "PH-CAM": true, "PH-CAN": true, "PH-CAP": true, "PH-CAS": true, "PH-CAT": true, - "PH-CAV": true, "PH-CEB": true, "PH-COM": true, "PH-DAO": true, "PH-DAS": true, - "PH-DAV": true, "PH-DIN": true, "PH-EAS": true, "PH-GUI": true, "PH-IFU": true, - "PH-ILI": true, "PH-ILN": true, "PH-ILS": true, "PH-ISA": true, "PH-KAL": true, - "PH-LAG": true, "PH-LAN": true, "PH-LAS": true, "PH-LEY": true, "PH-LUN": true, - "PH-MAD": true, "PH-MAG": true, "PH-MAS": true, "PH-MDC": true, "PH-MDR": true, - "PH-MOU": true, "PH-MSC": true, "PH-MSR": true, "PH-NCO": true, "PH-NEC": true, - "PH-NER": true, "PH-NSA": true, "PH-NUE": true, "PH-NUV": true, "PH-PAM": true, - "PH-PAN": true, "PH-PLW": true, "PH-QUE": true, "PH-QUI": true, "PH-RIZ": true, - "PH-ROM": true, "PH-SAR": true, "PH-SCO": true, "PH-SIG": true, "PH-SLE": true, - "PH-SLU": true, "PH-SOR": true, "PH-SUK": true, "PH-SUN": true, "PH-SUR": true, - "PH-TAR": true, "PH-TAW": true, "PH-WSA": true, "PH-ZAN": true, "PH-ZAS": true, - "PH-ZMB": true, "PH-ZSI": true, "PK-BA": true, "PK-GB": true, "PK-IS": true, - "PK-JK": true, "PK-KP": true, "PK-PB": true, "PK-SD": true, "PK-TA": true, - "PL-02": true, "PL-04": true, "PL-06": true, "PL-08": true, "PL-10": true, - "PL-12": true, "PL-14": true, "PL-16": true, "PL-18": true, "PL-20": true, - "PL-22": true, "PL-24": true, "PL-26": true, "PL-28": true, "PL-30": true, "PL-32": true, - "PS-BTH": true, "PS-DEB": true, "PS-GZA": true, "PS-HBN": true, - "PS-JEM": true, "PS-JEN": true, "PS-JRH": true, "PS-KYS": true, "PS-NBS": true, - "PS-NGZ": true, "PS-QQA": true, "PS-RBH": true, "PS-RFH": true, "PS-SLT": true, - "PS-TBS": true, "PS-TKM": true, "PT-01": true, "PT-02": true, "PT-03": true, - "PT-04": true, "PT-05": true, "PT-06": true, "PT-07": true, "PT-08": true, - "PT-09": true, "PT-10": true, "PT-11": true, "PT-12": true, "PT-13": true, - "PT-14": true, "PT-15": true, "PT-16": true, "PT-17": true, "PT-18": true, - "PT-20": true, "PT-30": true, "PW-002": true, "PW-004": true, "PW-010": true, - "PW-050": true, "PW-100": true, "PW-150": true, "PW-212": true, "PW-214": true, - "PW-218": true, "PW-222": true, "PW-224": true, "PW-226": true, "PW-227": true, - "PW-228": true, "PW-350": true, "PW-370": true, "PY-1": true, "PY-10": true, - "PY-11": true, "PY-12": true, "PY-13": true, "PY-14": true, "PY-15": true, - "PY-16": true, "PY-19": true, "PY-2": true, "PY-3": true, "PY-4": true, - "PY-5": true, "PY-6": true, "PY-7": true, "PY-8": true, "PY-9": true, - "PY-ASU": true, "QA-DA": true, "QA-KH": true, "QA-MS": true, "QA-RA": true, - "QA-US": true, "QA-WA": true, "QA-ZA": true, "RO-AB": true, "RO-AG": true, - "RO-AR": true, "RO-B": true, "RO-BC": true, "RO-BH": true, "RO-BN": true, - "RO-BR": true, "RO-BT": true, "RO-BV": true, "RO-BZ": true, "RO-CJ": true, - "RO-CL": true, "RO-CS": true, "RO-CT": true, "RO-CV": true, "RO-DB": true, - "RO-DJ": true, "RO-GJ": true, "RO-GL": true, "RO-GR": true, "RO-HD": true, - "RO-HR": true, "RO-IF": true, "RO-IL": true, "RO-IS": true, "RO-MH": true, - "RO-MM": true, "RO-MS": true, "RO-NT": true, "RO-OT": true, "RO-PH": true, - "RO-SB": true, "RO-SJ": true, "RO-SM": true, "RO-SV": true, "RO-TL": true, - "RO-TM": true, "RO-TR": true, "RO-VL": true, "RO-VN": true, "RO-VS": true, - "RS-00": true, "RS-01": true, "RS-02": true, "RS-03": true, "RS-04": true, - "RS-05": true, "RS-06": true, "RS-07": true, "RS-08": true, "RS-09": true, - "RS-10": true, "RS-11": true, "RS-12": true, "RS-13": true, "RS-14": true, - "RS-15": true, "RS-16": true, "RS-17": true, "RS-18": true, "RS-19": true, - "RS-20": true, "RS-21": true, "RS-22": true, "RS-23": true, "RS-24": true, - "RS-25": true, "RS-26": true, "RS-27": true, "RS-28": true, "RS-29": true, - "RS-KM": true, "RS-VO": true, "RU-AD": true, "RU-AL": true, "RU-ALT": true, - "RU-AMU": true, "RU-ARK": true, "RU-AST": true, "RU-BA": true, "RU-BEL": true, - "RU-BRY": true, "RU-BU": true, "RU-CE": true, "RU-CHE": true, "RU-CHU": true, - "RU-CU": true, "RU-DA": true, "RU-IN": true, "RU-IRK": true, "RU-IVA": true, - "RU-KAM": true, "RU-KB": true, "RU-KC": true, "RU-KDA": true, "RU-KEM": true, - "RU-KGD": true, "RU-KGN": true, "RU-KHA": true, "RU-KHM": true, "RU-KIR": true, - "RU-KK": true, "RU-KL": true, "RU-KLU": true, "RU-KO": true, "RU-KOS": true, - "RU-KR": true, "RU-KRS": true, "RU-KYA": true, "RU-LEN": true, "RU-LIP": true, - "RU-MAG": true, "RU-ME": true, "RU-MO": true, "RU-MOS": true, "RU-MOW": true, - "RU-MUR": true, "RU-NEN": true, "RU-NGR": true, "RU-NIZ": true, "RU-NVS": true, - "RU-OMS": true, "RU-ORE": true, "RU-ORL": true, "RU-PER": true, "RU-PNZ": true, - "RU-PRI": true, "RU-PSK": true, "RU-ROS": true, "RU-RYA": true, "RU-SA": true, - "RU-SAK": true, "RU-SAM": true, "RU-SAR": true, "RU-SE": true, "RU-SMO": true, - "RU-SPE": true, "RU-STA": true, "RU-SVE": true, "RU-TA": true, "RU-TAM": true, - "RU-TOM": true, "RU-TUL": true, "RU-TVE": true, "RU-TY": true, "RU-TYU": true, - "RU-UD": true, "RU-ULY": true, "RU-VGG": true, "RU-VLA": true, "RU-VLG": true, - "RU-VOR": true, "RU-YAN": true, "RU-YAR": true, "RU-YEV": true, "RU-ZAB": true, - "RW-01": true, "RW-02": true, "RW-03": true, "RW-04": true, "RW-05": true, - "SA-01": true, "SA-02": true, "SA-03": true, "SA-04": true, "SA-05": true, - "SA-06": true, "SA-07": true, "SA-08": true, "SA-09": true, "SA-10": true, - "SA-11": true, "SA-12": true, "SA-14": true, "SB-CE": true, "SB-CH": true, - "SB-CT": true, "SB-GU": true, "SB-IS": true, "SB-MK": true, "SB-ML": true, - "SB-RB": true, "SB-TE": true, "SB-WE": true, "SC-01": true, "SC-02": true, - "SC-03": true, "SC-04": true, "SC-05": true, "SC-06": true, "SC-07": true, - "SC-08": true, "SC-09": true, "SC-10": true, "SC-11": true, "SC-12": true, - "SC-13": true, "SC-14": true, "SC-15": true, "SC-16": true, "SC-17": true, - "SC-18": true, "SC-19": true, "SC-20": true, "SC-21": true, "SC-22": true, - "SC-23": true, "SC-24": true, "SC-25": true, "SD-DC": true, "SD-DE": true, - "SD-DN": true, "SD-DS": true, "SD-DW": true, "SD-GD": true, "SD-GK": true, "SD-GZ": true, - "SD-KA": true, "SD-KH": true, "SD-KN": true, "SD-KS": true, "SD-NB": true, - "SD-NO": true, "SD-NR": true, "SD-NW": true, "SD-RS": true, "SD-SI": true, - "SE-AB": true, "SE-AC": true, "SE-BD": true, "SE-C": true, "SE-D": true, - "SE-E": true, "SE-F": true, "SE-G": true, "SE-H": true, "SE-I": true, - "SE-K": true, "SE-M": true, "SE-N": true, "SE-O": true, "SE-S": true, - "SE-T": true, "SE-U": true, "SE-W": true, "SE-X": true, "SE-Y": true, - "SE-Z": true, "SG-01": true, "SG-02": true, "SG-03": true, "SG-04": true, - "SG-05": true, "SH-AC": true, "SH-HL": true, "SH-TA": true, "SI-001": true, - "SI-002": true, "SI-003": true, "SI-004": true, "SI-005": true, "SI-006": true, - "SI-007": true, "SI-008": true, "SI-009": true, "SI-010": true, "SI-011": true, - "SI-012": true, "SI-013": true, "SI-014": true, "SI-015": true, "SI-016": true, - "SI-017": true, "SI-018": true, "SI-019": true, "SI-020": true, "SI-021": true, - "SI-022": true, "SI-023": true, "SI-024": true, "SI-025": true, "SI-026": true, - "SI-027": true, "SI-028": true, "SI-029": true, "SI-030": true, "SI-031": true, - "SI-032": true, "SI-033": true, "SI-034": true, "SI-035": true, "SI-036": true, - "SI-037": true, "SI-038": true, "SI-039": true, "SI-040": true, "SI-041": true, - "SI-042": true, "SI-043": true, "SI-044": true, "SI-045": true, "SI-046": true, - "SI-047": true, "SI-048": true, "SI-049": true, "SI-050": true, "SI-051": true, - "SI-052": true, "SI-053": true, "SI-054": true, "SI-055": true, "SI-056": true, - "SI-057": true, "SI-058": true, "SI-059": true, "SI-060": true, "SI-061": true, - "SI-062": true, "SI-063": true, "SI-064": true, "SI-065": true, "SI-066": true, - "SI-067": true, "SI-068": true, "SI-069": true, "SI-070": true, "SI-071": true, - "SI-072": true, "SI-073": true, "SI-074": true, "SI-075": true, "SI-076": true, - "SI-077": true, "SI-078": true, "SI-079": true, "SI-080": true, "SI-081": true, - "SI-082": true, "SI-083": true, "SI-084": true, "SI-085": true, "SI-086": true, - "SI-087": true, "SI-088": true, "SI-089": true, "SI-090": true, "SI-091": true, - "SI-092": true, "SI-093": true, "SI-094": true, "SI-095": true, "SI-096": true, - "SI-097": true, "SI-098": true, "SI-099": true, "SI-100": true, "SI-101": true, - "SI-102": true, "SI-103": true, "SI-104": true, "SI-105": true, "SI-106": true, - "SI-107": true, "SI-108": true, "SI-109": true, "SI-110": true, "SI-111": true, - "SI-112": true, "SI-113": true, "SI-114": true, "SI-115": true, "SI-116": true, - "SI-117": true, "SI-118": true, "SI-119": true, "SI-120": true, "SI-121": true, - "SI-122": true, "SI-123": true, "SI-124": true, "SI-125": true, "SI-126": true, - "SI-127": true, "SI-128": true, "SI-129": true, "SI-130": true, "SI-131": true, - "SI-132": true, "SI-133": true, "SI-134": true, "SI-135": true, "SI-136": true, - "SI-137": true, "SI-138": true, "SI-139": true, "SI-140": true, "SI-141": true, - "SI-142": true, "SI-143": true, "SI-144": true, "SI-146": true, "SI-147": true, - "SI-148": true, "SI-149": true, "SI-150": true, "SI-151": true, "SI-152": true, - "SI-153": true, "SI-154": true, "SI-155": true, "SI-156": true, "SI-157": true, - "SI-158": true, "SI-159": true, "SI-160": true, "SI-161": true, "SI-162": true, - "SI-163": true, "SI-164": true, "SI-165": true, "SI-166": true, "SI-167": true, - "SI-168": true, "SI-169": true, "SI-170": true, "SI-171": true, "SI-172": true, - "SI-173": true, "SI-174": true, "SI-175": true, "SI-176": true, "SI-177": true, - "SI-178": true, "SI-179": true, "SI-180": true, "SI-181": true, "SI-182": true, - "SI-183": true, "SI-184": true, "SI-185": true, "SI-186": true, "SI-187": true, - "SI-188": true, "SI-189": true, "SI-190": true, "SI-191": true, "SI-192": true, - "SI-193": true, "SI-194": true, "SI-195": true, "SI-196": true, "SI-197": true, - "SI-198": true, "SI-199": true, "SI-200": true, "SI-201": true, "SI-202": true, - "SI-203": true, "SI-204": true, "SI-205": true, "SI-206": true, "SI-207": true, - "SI-208": true, "SI-209": true, "SI-210": true, "SI-211": true, "SI-212": true, "SI-213": true, "SK-BC": true, - "SK-BL": true, "SK-KI": true, "SK-NI": true, "SK-PV": true, "SK-TA": true, - "SK-TC": true, "SK-ZI": true, "SL-E": true, "SL-N": true, "SL-S": true, - "SL-W": true, "SM-01": true, "SM-02": true, "SM-03": true, "SM-04": true, - "SM-05": true, "SM-06": true, "SM-07": true, "SM-08": true, "SM-09": true, - "SN-DB": true, "SN-DK": true, "SN-FK": true, "SN-KA": true, "SN-KD": true, - "SN-KE": true, "SN-KL": true, "SN-LG": true, "SN-MT": true, "SN-SE": true, - "SN-SL": true, "SN-TC": true, "SN-TH": true, "SN-ZG": true, "SO-AW": true, - "SO-BK": true, "SO-BN": true, "SO-BR": true, "SO-BY": true, "SO-GA": true, - "SO-GE": true, "SO-HI": true, "SO-JD": true, "SO-JH": true, "SO-MU": true, - "SO-NU": true, "SO-SA": true, "SO-SD": true, "SO-SH": true, "SO-SO": true, - "SO-TO": true, "SO-WO": true, "SR-BR": true, "SR-CM": true, "SR-CR": true, - "SR-MA": true, "SR-NI": true, "SR-PM": true, "SR-PR": true, "SR-SA": true, - "SR-SI": true, "SR-WA": true, "SS-BN": true, "SS-BW": true, "SS-EC": true, - "SS-EE8": true, "SS-EE": true, "SS-EW": true, "SS-JG": true, "SS-LK": true, "SS-NU": true, - "SS-UY": true, "SS-WR": true, "ST-01": true, "ST-P": true, "ST-S": true, "SV-AH": true, - "SV-CA": true, "SV-CH": true, "SV-CU": true, "SV-LI": true, "SV-MO": true, - "SV-PA": true, "SV-SA": true, "SV-SM": true, "SV-SO": true, "SV-SS": true, - "SV-SV": true, "SV-UN": true, "SV-US": true, "SY-DI": true, "SY-DR": true, - "SY-DY": true, "SY-HA": true, "SY-HI": true, "SY-HL": true, "SY-HM": true, - "SY-ID": true, "SY-LA": true, "SY-QU": true, "SY-RA": true, "SY-RD": true, - "SY-SU": true, "SY-TA": true, "SZ-HH": true, "SZ-LU": true, "SZ-MA": true, - "SZ-SH": true, "TD-BA": true, "TD-BG": true, "TD-BO": true, "TD-CB": true, - "TD-EN": true, "TD-GR": true, "TD-HL": true, "TD-KA": true, "TD-LC": true, - "TD-LO": true, "TD-LR": true, "TD-MA": true, "TD-MC": true, "TD-ME": true, - "TD-MO": true, "TD-ND": true, "TD-OD": true, "TD-SA": true, "TD-SI": true, - "TD-TA": true, "TD-TI": true, "TD-WF": true, "TG-C": true, "TG-K": true, - "TG-M": true, "TG-P": true, "TG-S": true, "TH-10": true, "TH-11": true, - "TH-12": true, "TH-13": true, "TH-14": true, "TH-15": true, "TH-16": true, - "TH-17": true, "TH-18": true, "TH-19": true, "TH-20": true, "TH-21": true, - "TH-22": true, "TH-23": true, "TH-24": true, "TH-25": true, "TH-26": true, - "TH-27": true, "TH-30": true, "TH-31": true, "TH-32": true, "TH-33": true, - "TH-34": true, "TH-35": true, "TH-36": true, "TH-37": true, "TH-38": true, "TH-39": true, - "TH-40": true, "TH-41": true, "TH-42": true, "TH-43": true, "TH-44": true, - "TH-45": true, "TH-46": true, "TH-47": true, "TH-48": true, "TH-49": true, - "TH-50": true, "TH-51": true, "TH-52": true, "TH-53": true, "TH-54": true, - "TH-55": true, "TH-56": true, "TH-57": true, "TH-58": true, "TH-60": true, - "TH-61": true, "TH-62": true, "TH-63": true, "TH-64": true, "TH-65": true, - "TH-66": true, "TH-67": true, "TH-70": true, "TH-71": true, "TH-72": true, - "TH-73": true, "TH-74": true, "TH-75": true, "TH-76": true, "TH-77": true, - "TH-80": true, "TH-81": true, "TH-82": true, "TH-83": true, "TH-84": true, - "TH-85": true, "TH-86": true, "TH-90": true, "TH-91": true, "TH-92": true, - "TH-93": true, "TH-94": true, "TH-95": true, "TH-96": true, "TH-S": true, - "TJ-GB": true, "TJ-KT": true, "TJ-SU": true, "TJ-DU": true, "TJ-RA": true, "TL-AL": true, "TL-AN": true, - "TL-BA": true, "TL-BO": true, "TL-CO": true, "TL-DI": true, "TL-ER": true, - "TL-LA": true, "TL-LI": true, "TL-MF": true, "TL-MT": true, "TL-OE": true, - "TL-VI": true, "TM-A": true, "TM-B": true, "TM-D": true, "TM-L": true, - "TM-M": true, "TM-S": true, "TN-11": true, "TN-12": true, "TN-13": true, - "TN-14": true, "TN-21": true, "TN-22": true, "TN-23": true, "TN-31": true, - "TN-32": true, "TN-33": true, "TN-34": true, "TN-41": true, "TN-42": true, - "TN-43": true, "TN-51": true, "TN-52": true, "TN-53": true, "TN-61": true, - "TN-71": true, "TN-72": true, "TN-73": true, "TN-81": true, "TN-82": true, - "TN-83": true, "TO-01": true, "TO-02": true, "TO-03": true, "TO-04": true, - "TO-05": true, "TR-01": true, "TR-02": true, "TR-03": true, "TR-04": true, - "TR-05": true, "TR-06": true, "TR-07": true, "TR-08": true, "TR-09": true, - "TR-10": true, "TR-11": true, "TR-12": true, "TR-13": true, "TR-14": true, - "TR-15": true, "TR-16": true, "TR-17": true, "TR-18": true, "TR-19": true, - "TR-20": true, "TR-21": true, "TR-22": true, "TR-23": true, "TR-24": true, - "TR-25": true, "TR-26": true, "TR-27": true, "TR-28": true, "TR-29": true, - "TR-30": true, "TR-31": true, "TR-32": true, "TR-33": true, "TR-34": true, - "TR-35": true, "TR-36": true, "TR-37": true, "TR-38": true, "TR-39": true, - "TR-40": true, "TR-41": true, "TR-42": true, "TR-43": true, "TR-44": true, - "TR-45": true, "TR-46": true, "TR-47": true, "TR-48": true, "TR-49": true, - "TR-50": true, "TR-51": true, "TR-52": true, "TR-53": true, "TR-54": true, - "TR-55": true, "TR-56": true, "TR-57": true, "TR-58": true, "TR-59": true, - "TR-60": true, "TR-61": true, "TR-62": true, "TR-63": true, "TR-64": true, - "TR-65": true, "TR-66": true, "TR-67": true, "TR-68": true, "TR-69": true, - "TR-70": true, "TR-71": true, "TR-72": true, "TR-73": true, "TR-74": true, - "TR-75": true, "TR-76": true, "TR-77": true, "TR-78": true, "TR-79": true, - "TR-80": true, "TR-81": true, "TT-ARI": true, "TT-CHA": true, "TT-CTT": true, - "TT-DMN": true, "TT-ETO": true, "TT-MRC": true, "TT-TOB": true, "TT-PED": true, "TT-POS": true, "TT-PRT": true, - "TT-PTF": true, "TT-RCM": true, "TT-SFO": true, "TT-SGE": true, "TT-SIP": true, - "TT-SJL": true, "TT-TUP": true, "TT-WTO": true, "TV-FUN": true, "TV-NIT": true, - "TV-NKF": true, "TV-NKL": true, "TV-NMA": true, "TV-NMG": true, "TV-NUI": true, - "TV-VAI": true, "TW-CHA": true, "TW-CYI": true, "TW-CYQ": true, "TW-KIN": true, "TW-HSQ": true, - "TW-HSZ": true, "TW-HUA": true, "TW-LIE": true, "TW-ILA": true, "TW-KEE": true, "TW-KHH": true, - "TW-KHQ": true, "TW-MIA": true, "TW-NAN": true, "TW-NWT": true, "TW-PEN": true, "TW-PIF": true, - "TW-TAO": true, "TW-TNN": true, "TW-TNQ": true, "TW-TPE": true, "TW-TPQ": true, - "TW-TTT": true, "TW-TXG": true, "TW-TXQ": true, "TW-YUN": true, "TZ-01": true, - "TZ-02": true, "TZ-03": true, "TZ-04": true, "TZ-05": true, "TZ-06": true, - "TZ-07": true, "TZ-08": true, "TZ-09": true, "TZ-10": true, "TZ-11": true, - "TZ-12": true, "TZ-13": true, "TZ-14": true, "TZ-15": true, "TZ-16": true, - "TZ-17": true, "TZ-18": true, "TZ-19": true, "TZ-20": true, "TZ-21": true, - "TZ-22": true, "TZ-23": true, "TZ-24": true, "TZ-25": true, "TZ-26": true, "TZ-27": true, "TZ-28": true, "TZ-29": true, "TZ-30": true, "TZ-31": true, - "UA-05": true, "UA-07": true, "UA-09": true, "UA-12": true, "UA-14": true, - "UA-18": true, "UA-21": true, "UA-23": true, "UA-26": true, "UA-30": true, - "UA-32": true, "UA-35": true, "UA-40": true, "UA-43": true, "UA-46": true, - "UA-48": true, "UA-51": true, "UA-53": true, "UA-56": true, "UA-59": true, - "UA-61": true, "UA-63": true, "UA-65": true, "UA-68": true, "UA-71": true, - "UA-74": true, "UA-77": true, "UG-101": true, "UG-102": true, "UG-103": true, - "UG-104": true, "UG-105": true, "UG-106": true, "UG-107": true, "UG-108": true, - "UG-109": true, "UG-110": true, "UG-111": true, "UG-112": true, "UG-113": true, - "UG-114": true, "UG-115": true, "UG-116": true, "UG-201": true, "UG-202": true, - "UG-203": true, "UG-204": true, "UG-205": true, "UG-206": true, "UG-207": true, - "UG-208": true, "UG-209": true, "UG-210": true, "UG-211": true, "UG-212": true, - "UG-213": true, "UG-214": true, "UG-215": true, "UG-216": true, "UG-217": true, - "UG-218": true, "UG-219": true, "UG-220": true, "UG-221": true, "UG-222": true, - "UG-223": true, "UG-224": true, "UG-301": true, "UG-302": true, "UG-303": true, - "UG-304": true, "UG-305": true, "UG-306": true, "UG-307": true, "UG-308": true, - "UG-309": true, "UG-310": true, "UG-311": true, "UG-312": true, "UG-313": true, - "UG-314": true, "UG-315": true, "UG-316": true, "UG-317": true, "UG-318": true, - "UG-319": true, "UG-320": true, "UG-321": true, "UG-401": true, "UG-402": true, - "UG-403": true, "UG-404": true, "UG-405": true, "UG-406": true, "UG-407": true, - "UG-408": true, "UG-409": true, "UG-410": true, "UG-411": true, "UG-412": true, - "UG-413": true, "UG-414": true, "UG-415": true, "UG-416": true, "UG-417": true, - "UG-418": true, "UG-419": true, "UG-C": true, "UG-E": true, "UG-N": true, - "UG-W": true, "UG-322": true, "UG-323": true, "UG-420": true, "UG-117": true, - "UG-118": true, "UG-225": true, "UG-120": true, "UG-226": true, - "UG-121": true, "UG-122": true, "UG-227": true, "UG-421": true, - "UG-325": true, "UG-228": true, "UG-123": true, "UG-422": true, - "UG-326": true, "UG-229": true, "UG-124": true, "UG-423": true, - "UG-230": true, "UG-327": true, "UG-424": true, "UG-328": true, - "UG-425": true, "UG-426": true, "UG-330": true, - "UM-67": true, "UM-71": true, "UM-76": true, "UM-79": true, - "UM-81": true, "UM-84": true, "UM-86": true, "UM-89": true, "UM-95": true, - "US-AK": true, "US-AL": true, "US-AR": true, "US-AS": true, "US-AZ": true, - "US-CA": true, "US-CO": true, "US-CT": true, "US-DC": true, "US-DE": true, - "US-FL": true, "US-GA": true, "US-GU": true, "US-HI": true, "US-IA": true, - "US-ID": true, "US-IL": true, "US-IN": true, "US-KS": true, "US-KY": true, - "US-LA": true, "US-MA": true, "US-MD": true, "US-ME": true, "US-MI": true, - "US-MN": true, "US-MO": true, "US-MP": true, "US-MS": true, "US-MT": true, - "US-NC": true, "US-ND": true, "US-NE": true, "US-NH": true, "US-NJ": true, - "US-NM": true, "US-NV": true, "US-NY": true, "US-OH": true, "US-OK": true, - "US-OR": true, "US-PA": true, "US-PR": true, "US-RI": true, "US-SC": true, - "US-SD": true, "US-TN": true, "US-TX": true, "US-UM": true, "US-UT": true, - "US-VA": true, "US-VI": true, "US-VT": true, "US-WA": true, "US-WI": true, - "US-WV": true, "US-WY": true, "UY-AR": true, "UY-CA": true, "UY-CL": true, - "UY-CO": true, "UY-DU": true, "UY-FD": true, "UY-FS": true, "UY-LA": true, - "UY-MA": true, "UY-MO": true, "UY-PA": true, "UY-RN": true, "UY-RO": true, - "UY-RV": true, "UY-SA": true, "UY-SJ": true, "UY-SO": true, "UY-TA": true, - "UY-TT": true, "UZ-AN": true, "UZ-BU": true, "UZ-FA": true, "UZ-JI": true, - "UZ-NG": true, "UZ-NW": true, "UZ-QA": true, "UZ-QR": true, "UZ-SA": true, - "UZ-SI": true, "UZ-SU": true, "UZ-TK": true, "UZ-TO": true, "UZ-XO": true, - "VC-01": true, "VC-02": true, "VC-03": true, "VC-04": true, "VC-05": true, - "VC-06": true, "VE-A": true, "VE-B": true, "VE-C": true, "VE-D": true, - "VE-E": true, "VE-F": true, "VE-G": true, "VE-H": true, "VE-I": true, - "VE-J": true, "VE-K": true, "VE-L": true, "VE-M": true, "VE-N": true, - "VE-O": true, "VE-P": true, "VE-R": true, "VE-S": true, "VE-T": true, - "VE-U": true, "VE-V": true, "VE-W": true, "VE-X": true, "VE-Y": true, - "VE-Z": true, "VN-01": true, "VN-02": true, "VN-03": true, "VN-04": true, - "VN-05": true, "VN-06": true, "VN-07": true, "VN-09": true, "VN-13": true, - "VN-14": true, "VN-15": true, "VN-18": true, "VN-20": true, "VN-21": true, - "VN-22": true, "VN-23": true, "VN-24": true, "VN-25": true, "VN-26": true, - "VN-27": true, "VN-28": true, "VN-29": true, "VN-30": true, "VN-31": true, - "VN-32": true, "VN-33": true, "VN-34": true, "VN-35": true, "VN-36": true, - "VN-37": true, "VN-39": true, "VN-40": true, "VN-41": true, "VN-43": true, - "VN-44": true, "VN-45": true, "VN-46": true, "VN-47": true, "VN-49": true, - "VN-50": true, "VN-51": true, "VN-52": true, "VN-53": true, "VN-54": true, - "VN-55": true, "VN-56": true, "VN-57": true, "VN-58": true, "VN-59": true, - "VN-61": true, "VN-63": true, "VN-66": true, "VN-67": true, "VN-68": true, - "VN-69": true, "VN-70": true, "VN-71": true, "VN-72": true, "VN-73": true, - "VN-CT": true, "VN-DN": true, "VN-HN": true, "VN-HP": true, "VN-SG": true, - "VU-MAP": true, "VU-PAM": true, "VU-SAM": true, "VU-SEE": true, "VU-TAE": true, - "VU-TOB": true, "WF-SG": true, "WF-UV": true, "WS-AA": true, "WS-AL": true, "WS-AT": true, "WS-FA": true, - "WS-GE": true, "WS-GI": true, "WS-PA": true, "WS-SA": true, "WS-TU": true, - "WS-VF": true, "WS-VS": true, "YE-AB": true, "YE-AD": true, "YE-AM": true, - "YE-BA": true, "YE-DA": true, "YE-DH": true, "YE-HD": true, "YE-HJ": true, "YE-HU": true, - "YE-IB": true, "YE-JA": true, "YE-LA": true, "YE-MA": true, "YE-MR": true, - "YE-MU": true, "YE-MW": true, "YE-RA": true, "YE-SA": true, "YE-SD": true, "YE-SH": true, - "YE-SN": true, "YE-TA": true, "ZA-EC": true, "ZA-FS": true, "ZA-GP": true, - "ZA-LP": true, "ZA-MP": true, "ZA-NC": true, "ZA-NW": true, "ZA-WC": true, - "ZA-ZN": true, "ZA-KZN": true, "ZM-01": true, "ZM-02": true, "ZM-03": true, "ZM-04": true, - "ZM-05": true, "ZM-06": true, "ZM-07": true, "ZM-08": true, "ZM-09": true, "ZM-10": true, - "ZW-BU": true, "ZW-HA": true, "ZW-MA": true, "ZW-MC": true, "ZW-ME": true, - "ZW-MI": true, "ZW-MN": true, "ZW-MS": true, "ZW-MV": true, "ZW-MW": true, +var iso3166_2 = map[string]struct{}{ + "AD-02": {}, "AD-03": {}, "AD-04": {}, "AD-05": {}, "AD-06": {}, + "AD-07": {}, "AD-08": {}, "AE-AJ": {}, "AE-AZ": {}, "AE-DU": {}, + "AE-FU": {}, "AE-RK": {}, "AE-SH": {}, "AE-UQ": {}, "AF-BAL": {}, + "AF-BAM": {}, "AF-BDG": {}, "AF-BDS": {}, "AF-BGL": {}, "AF-DAY": {}, + "AF-FRA": {}, "AF-FYB": {}, "AF-GHA": {}, "AF-GHO": {}, "AF-HEL": {}, + "AF-HER": {}, "AF-JOW": {}, "AF-KAB": {}, "AF-KAN": {}, "AF-KAP": {}, + "AF-KDZ": {}, "AF-KHO": {}, "AF-KNR": {}, "AF-LAG": {}, "AF-LOG": {}, + "AF-NAN": {}, "AF-NIM": {}, "AF-NUR": {}, "AF-PAN": {}, "AF-PAR": {}, + "AF-PIA": {}, "AF-PKA": {}, "AF-SAM": {}, "AF-SAR": {}, "AF-TAK": {}, + "AF-URU": {}, "AF-WAR": {}, "AF-ZAB": {}, "AG-03": {}, "AG-04": {}, + "AG-05": {}, "AG-06": {}, "AG-07": {}, "AG-08": {}, "AG-10": {}, + "AG-11": {}, "AL-01": {}, "AL-02": {}, "AL-03": {}, "AL-04": {}, + "AL-05": {}, "AL-06": {}, "AL-07": {}, "AL-08": {}, "AL-09": {}, + "AL-10": {}, "AL-11": {}, "AL-12": {}, "AL-BR": {}, "AL-BU": {}, + "AL-DI": {}, "AL-DL": {}, "AL-DR": {}, "AL-DV": {}, "AL-EL": {}, + "AL-ER": {}, "AL-FR": {}, "AL-GJ": {}, "AL-GR": {}, "AL-HA": {}, + "AL-KA": {}, "AL-KB": {}, "AL-KC": {}, "AL-KO": {}, "AL-KR": {}, + "AL-KU": {}, "AL-LB": {}, "AL-LE": {}, "AL-LU": {}, "AL-MK": {}, + "AL-MM": {}, "AL-MR": {}, "AL-MT": {}, "AL-PG": {}, "AL-PQ": {}, + "AL-PR": {}, "AL-PU": {}, "AL-SH": {}, "AL-SK": {}, "AL-SR": {}, + "AL-TE": {}, "AL-TP": {}, "AL-TR": {}, "AL-VL": {}, "AM-AG": {}, + "AM-AR": {}, "AM-AV": {}, "AM-ER": {}, "AM-GR": {}, "AM-KT": {}, + "AM-LO": {}, "AM-SH": {}, "AM-SU": {}, "AM-TV": {}, "AM-VD": {}, + "AO-BGO": {}, "AO-BGU": {}, "AO-BIE": {}, "AO-CAB": {}, "AO-CCU": {}, + "AO-CNN": {}, "AO-CNO": {}, "AO-CUS": {}, "AO-HUA": {}, "AO-HUI": {}, + "AO-LNO": {}, "AO-LSU": {}, "AO-LUA": {}, "AO-MAL": {}, "AO-MOX": {}, + "AO-NAM": {}, "AO-UIG": {}, "AO-ZAI": {}, "AR-A": {}, "AR-B": {}, + "AR-C": {}, "AR-D": {}, "AR-E": {}, "AR-F": {}, "AR-G": {}, "AR-H": {}, + "AR-J": {}, "AR-K": {}, "AR-L": {}, "AR-M": {}, "AR-N": {}, + "AR-P": {}, "AR-Q": {}, "AR-R": {}, "AR-S": {}, "AR-T": {}, + "AR-U": {}, "AR-V": {}, "AR-W": {}, "AR-X": {}, "AR-Y": {}, + "AR-Z": {}, "AT-1": {}, "AT-2": {}, "AT-3": {}, "AT-4": {}, + "AT-5": {}, "AT-6": {}, "AT-7": {}, "AT-8": {}, "AT-9": {}, + "AU-ACT": {}, "AU-NSW": {}, "AU-NT": {}, "AU-QLD": {}, "AU-SA": {}, + "AU-TAS": {}, "AU-VIC": {}, "AU-WA": {}, "AZ-ABS": {}, "AZ-AGA": {}, + "AZ-AGC": {}, "AZ-AGM": {}, "AZ-AGS": {}, "AZ-AGU": {}, "AZ-AST": {}, + "AZ-BA": {}, "AZ-BAB": {}, "AZ-BAL": {}, "AZ-BAR": {}, "AZ-BEY": {}, + "AZ-BIL": {}, "AZ-CAB": {}, "AZ-CAL": {}, "AZ-CUL": {}, "AZ-DAS": {}, + "AZ-FUZ": {}, "AZ-GA": {}, "AZ-GAD": {}, "AZ-GOR": {}, "AZ-GOY": {}, + "AZ-GYG": {}, "AZ-HAC": {}, "AZ-IMI": {}, "AZ-ISM": {}, "AZ-KAL": {}, + "AZ-KAN": {}, "AZ-KUR": {}, "AZ-LA": {}, "AZ-LAC": {}, "AZ-LAN": {}, + "AZ-LER": {}, "AZ-MAS": {}, "AZ-MI": {}, "AZ-NA": {}, "AZ-NEF": {}, + "AZ-NV": {}, "AZ-NX": {}, "AZ-OGU": {}, "AZ-ORD": {}, "AZ-QAB": {}, + "AZ-QAX": {}, "AZ-QAZ": {}, "AZ-QBA": {}, "AZ-QBI": {}, "AZ-QOB": {}, + "AZ-QUS": {}, "AZ-SA": {}, "AZ-SAB": {}, "AZ-SAD": {}, "AZ-SAH": {}, + "AZ-SAK": {}, "AZ-SAL": {}, "AZ-SAR": {}, "AZ-SAT": {}, "AZ-SBN": {}, + "AZ-SIY": {}, "AZ-SKR": {}, "AZ-SM": {}, "AZ-SMI": {}, "AZ-SMX": {}, + "AZ-SR": {}, "AZ-SUS": {}, "AZ-TAR": {}, "AZ-TOV": {}, "AZ-UCA": {}, + "AZ-XA": {}, "AZ-XAC": {}, "AZ-XCI": {}, "AZ-XIZ": {}, "AZ-XVD": {}, + "AZ-YAR": {}, "AZ-YE": {}, "AZ-YEV": {}, "AZ-ZAN": {}, "AZ-ZAQ": {}, + "AZ-ZAR": {}, "BA-01": {}, "BA-02": {}, "BA-03": {}, "BA-04": {}, + "BA-05": {}, "BA-06": {}, "BA-07": {}, "BA-08": {}, "BA-09": {}, + "BA-10": {}, "BA-BIH": {}, "BA-BRC": {}, "BA-SRP": {}, "BB-01": {}, + "BB-02": {}, "BB-03": {}, "BB-04": {}, "BB-05": {}, "BB-06": {}, + "BB-07": {}, "BB-08": {}, "BB-09": {}, "BB-10": {}, "BB-11": {}, + "BD-01": {}, "BD-02": {}, "BD-03": {}, "BD-04": {}, "BD-05": {}, + "BD-06": {}, "BD-07": {}, "BD-08": {}, "BD-09": {}, "BD-10": {}, + "BD-11": {}, "BD-12": {}, "BD-13": {}, "BD-14": {}, "BD-15": {}, + "BD-16": {}, "BD-17": {}, "BD-18": {}, "BD-19": {}, "BD-20": {}, + "BD-21": {}, "BD-22": {}, "BD-23": {}, "BD-24": {}, "BD-25": {}, + "BD-26": {}, "BD-27": {}, "BD-28": {}, "BD-29": {}, "BD-30": {}, + "BD-31": {}, "BD-32": {}, "BD-33": {}, "BD-34": {}, "BD-35": {}, + "BD-36": {}, "BD-37": {}, "BD-38": {}, "BD-39": {}, "BD-40": {}, + "BD-41": {}, "BD-42": {}, "BD-43": {}, "BD-44": {}, "BD-45": {}, + "BD-46": {}, "BD-47": {}, "BD-48": {}, "BD-49": {}, "BD-50": {}, + "BD-51": {}, "BD-52": {}, "BD-53": {}, "BD-54": {}, "BD-55": {}, + "BD-56": {}, "BD-57": {}, "BD-58": {}, "BD-59": {}, "BD-60": {}, + "BD-61": {}, "BD-62": {}, "BD-63": {}, "BD-64": {}, "BD-A": {}, + "BD-B": {}, "BD-C": {}, "BD-D": {}, "BD-E": {}, "BD-F": {}, + "BD-G": {}, "BE-BRU": {}, "BE-VAN": {}, "BE-VBR": {}, "BE-VLG": {}, + "BE-VLI": {}, "BE-VOV": {}, "BE-VWV": {}, "BE-WAL": {}, "BE-WBR": {}, + "BE-WHT": {}, "BE-WLG": {}, "BE-WLX": {}, "BE-WNA": {}, "BF-01": {}, + "BF-02": {}, "BF-03": {}, "BF-04": {}, "BF-05": {}, "BF-06": {}, + "BF-07": {}, "BF-08": {}, "BF-09": {}, "BF-10": {}, "BF-11": {}, + "BF-12": {}, "BF-13": {}, "BF-BAL": {}, "BF-BAM": {}, "BF-BAN": {}, + "BF-BAZ": {}, "BF-BGR": {}, "BF-BLG": {}, "BF-BLK": {}, "BF-COM": {}, + "BF-GAN": {}, "BF-GNA": {}, "BF-GOU": {}, "BF-HOU": {}, "BF-IOB": {}, + "BF-KAD": {}, "BF-KEN": {}, "BF-KMD": {}, "BF-KMP": {}, "BF-KOP": {}, + "BF-KOS": {}, "BF-KOT": {}, "BF-KOW": {}, "BF-LER": {}, "BF-LOR": {}, + "BF-MOU": {}, "BF-NAM": {}, "BF-NAO": {}, "BF-NAY": {}, "BF-NOU": {}, + "BF-OUB": {}, "BF-OUD": {}, "BF-PAS": {}, "BF-PON": {}, "BF-SEN": {}, + "BF-SIS": {}, "BF-SMT": {}, "BF-SNG": {}, "BF-SOM": {}, "BF-SOR": {}, + "BF-TAP": {}, "BF-TUI": {}, "BF-YAG": {}, "BF-YAT": {}, "BF-ZIR": {}, + "BF-ZON": {}, "BF-ZOU": {}, "BG-01": {}, "BG-02": {}, "BG-03": {}, + "BG-04": {}, "BG-05": {}, "BG-06": {}, "BG-07": {}, "BG-08": {}, + "BG-09": {}, "BG-10": {}, "BG-11": {}, "BG-12": {}, "BG-13": {}, + "BG-14": {}, "BG-15": {}, "BG-16": {}, "BG-17": {}, "BG-18": {}, + "BG-19": {}, "BG-20": {}, "BG-21": {}, "BG-22": {}, "BG-23": {}, + "BG-24": {}, "BG-25": {}, "BG-26": {}, "BG-27": {}, "BG-28": {}, + "BH-13": {}, "BH-14": {}, "BH-15": {}, "BH-16": {}, "BH-17": {}, + "BI-BB": {}, "BI-BL": {}, "BI-BM": {}, "BI-BR": {}, "BI-CA": {}, + "BI-CI": {}, "BI-GI": {}, "BI-KI": {}, "BI-KR": {}, "BI-KY": {}, + "BI-MA": {}, "BI-MU": {}, "BI-MW": {}, "BI-NG": {}, "BI-RM": {}, "BI-RT": {}, + "BI-RY": {}, "BJ-AK": {}, "BJ-AL": {}, "BJ-AQ": {}, "BJ-BO": {}, + "BJ-CO": {}, "BJ-DO": {}, "BJ-KO": {}, "BJ-LI": {}, "BJ-MO": {}, + "BJ-OU": {}, "BJ-PL": {}, "BJ-ZO": {}, "BN-BE": {}, "BN-BM": {}, + "BN-TE": {}, "BN-TU": {}, "BO-B": {}, "BO-C": {}, "BO-H": {}, + "BO-L": {}, "BO-N": {}, "BO-O": {}, "BO-P": {}, "BO-S": {}, + "BO-T": {}, "BQ-BO": {}, "BQ-SA": {}, "BQ-SE": {}, "BR-AC": {}, + "BR-AL": {}, "BR-AM": {}, "BR-AP": {}, "BR-BA": {}, "BR-CE": {}, + "BR-DF": {}, "BR-ES": {}, "BR-FN": {}, "BR-GO": {}, "BR-MA": {}, + "BR-MG": {}, "BR-MS": {}, "BR-MT": {}, "BR-PA": {}, "BR-PB": {}, + "BR-PE": {}, "BR-PI": {}, "BR-PR": {}, "BR-RJ": {}, "BR-RN": {}, + "BR-RO": {}, "BR-RR": {}, "BR-RS": {}, "BR-SC": {}, "BR-SE": {}, + "BR-SP": {}, "BR-TO": {}, "BS-AK": {}, "BS-BI": {}, "BS-BP": {}, + "BS-BY": {}, "BS-CE": {}, "BS-CI": {}, "BS-CK": {}, "BS-CO": {}, + "BS-CS": {}, "BS-EG": {}, "BS-EX": {}, "BS-FP": {}, "BS-GC": {}, + "BS-HI": {}, "BS-HT": {}, "BS-IN": {}, "BS-LI": {}, "BS-MC": {}, + "BS-MG": {}, "BS-MI": {}, "BS-NE": {}, "BS-NO": {}, "BS-NP": {}, "BS-NS": {}, + "BS-RC": {}, "BS-RI": {}, "BS-SA": {}, "BS-SE": {}, "BS-SO": {}, + "BS-SS": {}, "BS-SW": {}, "BS-WG": {}, "BT-11": {}, "BT-12": {}, + "BT-13": {}, "BT-14": {}, "BT-15": {}, "BT-21": {}, "BT-22": {}, + "BT-23": {}, "BT-24": {}, "BT-31": {}, "BT-32": {}, "BT-33": {}, + "BT-34": {}, "BT-41": {}, "BT-42": {}, "BT-43": {}, "BT-44": {}, + "BT-45": {}, "BT-GA": {}, "BT-TY": {}, "BW-CE": {}, "BW-CH": {}, "BW-GH": {}, + "BW-KG": {}, "BW-KL": {}, "BW-KW": {}, "BW-NE": {}, "BW-NW": {}, + "BW-SE": {}, "BW-SO": {}, "BY-BR": {}, "BY-HM": {}, "BY-HO": {}, + "BY-HR": {}, "BY-MA": {}, "BY-MI": {}, "BY-VI": {}, "BZ-BZ": {}, + "BZ-CY": {}, "BZ-CZL": {}, "BZ-OW": {}, "BZ-SC": {}, "BZ-TOL": {}, + "CA-AB": {}, "CA-BC": {}, "CA-MB": {}, "CA-NB": {}, "CA-NL": {}, + "CA-NS": {}, "CA-NT": {}, "CA-NU": {}, "CA-ON": {}, "CA-PE": {}, + "CA-QC": {}, "CA-SK": {}, "CA-YT": {}, "CD-BC": {}, "CD-BN": {}, + "CD-EQ": {}, "CD-HK": {}, "CD-IT": {}, "CD-KA": {}, "CD-KC": {}, "CD-KE": {}, "CD-KG": {}, "CD-KN": {}, + "CD-KW": {}, "CD-KS": {}, "CD-LU": {}, "CD-MA": {}, "CD-NK": {}, "CD-OR": {}, "CD-SA": {}, "CD-SK": {}, + "CD-TA": {}, "CD-TO": {}, "CF-AC": {}, "CF-BB": {}, "CF-BGF": {}, "CF-BK": {}, "CF-HK": {}, "CF-HM": {}, + "CF-HS": {}, "CF-KB": {}, "CF-KG": {}, "CF-LB": {}, "CF-MB": {}, + "CF-MP": {}, "CF-NM": {}, "CF-OP": {}, "CF-SE": {}, "CF-UK": {}, + "CF-VK": {}, "CG-11": {}, "CG-12": {}, "CG-13": {}, "CG-14": {}, + "CG-15": {}, "CG-16": {}, "CG-2": {}, "CG-5": {}, "CG-7": {}, "CG-8": {}, + "CG-9": {}, "CG-BZV": {}, "CH-AG": {}, "CH-AI": {}, "CH-AR": {}, + "CH-BE": {}, "CH-BL": {}, "CH-BS": {}, "CH-FR": {}, "CH-GE": {}, + "CH-GL": {}, "CH-GR": {}, "CH-JU": {}, "CH-LU": {}, "CH-NE": {}, + "CH-NW": {}, "CH-OW": {}, "CH-SG": {}, "CH-SH": {}, "CH-SO": {}, + "CH-SZ": {}, "CH-TG": {}, "CH-TI": {}, "CH-UR": {}, "CH-VD": {}, + "CH-VS": {}, "CH-ZG": {}, "CH-ZH": {}, "CI-AB": {}, "CI-BS": {}, + "CI-CM": {}, "CI-DN": {}, "CI-GD": {}, "CI-LC": {}, "CI-LG": {}, + "CI-MG": {}, "CI-SM": {}, "CI-SV": {}, "CI-VB": {}, "CI-WR": {}, + "CI-YM": {}, "CI-ZZ": {}, "CL-AI": {}, "CL-AN": {}, "CL-AP": {}, + "CL-AR": {}, "CL-AT": {}, "CL-BI": {}, "CL-CO": {}, "CL-LI": {}, + "CL-LL": {}, "CL-LR": {}, "CL-MA": {}, "CL-ML": {}, "CL-NB": {}, "CL-RM": {}, + "CL-TA": {}, "CL-VS": {}, "CM-AD": {}, "CM-CE": {}, "CM-EN": {}, + "CM-ES": {}, "CM-LT": {}, "CM-NO": {}, "CM-NW": {}, "CM-OU": {}, + "CM-SU": {}, "CM-SW": {}, "CN-AH": {}, "CN-BJ": {}, "CN-CQ": {}, + "CN-FJ": {}, "CN-GS": {}, "CN-GD": {}, "CN-GX": {}, "CN-GZ": {}, + "CN-HI": {}, "CN-HE": {}, "CN-HL": {}, "CN-HA": {}, "CN-HB": {}, + "CN-HN": {}, "CN-JS": {}, "CN-JX": {}, "CN-JL": {}, "CN-LN": {}, + "CN-NM": {}, "CN-NX": {}, "CN-QH": {}, "CN-SN": {}, "CN-SD": {}, "CN-SH": {}, + "CN-SX": {}, "CN-SC": {}, "CN-TJ": {}, "CN-XJ": {}, "CN-XZ": {}, "CN-YN": {}, + "CN-ZJ": {}, "CO-AMA": {}, "CO-ANT": {}, "CO-ARA": {}, "CO-ATL": {}, + "CO-BOL": {}, "CO-BOY": {}, "CO-CAL": {}, "CO-CAQ": {}, "CO-CAS": {}, + "CO-CAU": {}, "CO-CES": {}, "CO-CHO": {}, "CO-COR": {}, "CO-CUN": {}, + "CO-DC": {}, "CO-GUA": {}, "CO-GUV": {}, "CO-HUI": {}, "CO-LAG": {}, + "CO-MAG": {}, "CO-MET": {}, "CO-NAR": {}, "CO-NSA": {}, "CO-PUT": {}, + "CO-QUI": {}, "CO-RIS": {}, "CO-SAN": {}, "CO-SAP": {}, "CO-SUC": {}, + "CO-TOL": {}, "CO-VAC": {}, "CO-VAU": {}, "CO-VID": {}, "CR-A": {}, + "CR-C": {}, "CR-G": {}, "CR-H": {}, "CR-L": {}, "CR-P": {}, + "CR-SJ": {}, "CU-01": {}, "CU-02": {}, "CU-03": {}, "CU-04": {}, + "CU-05": {}, "CU-06": {}, "CU-07": {}, "CU-08": {}, "CU-09": {}, + "CU-10": {}, "CU-11": {}, "CU-12": {}, "CU-13": {}, "CU-14": {}, "CU-15": {}, + "CU-16": {}, "CU-99": {}, "CV-B": {}, "CV-BR": {}, "CV-BV": {}, "CV-CA": {}, + "CV-CF": {}, "CV-CR": {}, "CV-MA": {}, "CV-MO": {}, "CV-PA": {}, + "CV-PN": {}, "CV-PR": {}, "CV-RB": {}, "CV-RG": {}, "CV-RS": {}, + "CV-S": {}, "CV-SD": {}, "CV-SF": {}, "CV-SL": {}, "CV-SM": {}, + "CV-SO": {}, "CV-SS": {}, "CV-SV": {}, "CV-TA": {}, "CV-TS": {}, + "CY-01": {}, "CY-02": {}, "CY-03": {}, "CY-04": {}, "CY-05": {}, + "CY-06": {}, "CZ-10": {}, "CZ-101": {}, "CZ-102": {}, "CZ-103": {}, + "CZ-104": {}, "CZ-105": {}, "CZ-106": {}, "CZ-107": {}, "CZ-108": {}, + "CZ-109": {}, "CZ-110": {}, "CZ-111": {}, "CZ-112": {}, "CZ-113": {}, + "CZ-114": {}, "CZ-115": {}, "CZ-116": {}, "CZ-117": {}, "CZ-118": {}, + "CZ-119": {}, "CZ-120": {}, "CZ-121": {}, "CZ-122": {}, "CZ-20": {}, + "CZ-201": {}, "CZ-202": {}, "CZ-203": {}, "CZ-204": {}, "CZ-205": {}, + "CZ-206": {}, "CZ-207": {}, "CZ-208": {}, "CZ-209": {}, "CZ-20A": {}, + "CZ-20B": {}, "CZ-20C": {}, "CZ-31": {}, "CZ-311": {}, "CZ-312": {}, + "CZ-313": {}, "CZ-314": {}, "CZ-315": {}, "CZ-316": {}, "CZ-317": {}, + "CZ-32": {}, "CZ-321": {}, "CZ-322": {}, "CZ-323": {}, "CZ-324": {}, + "CZ-325": {}, "CZ-326": {}, "CZ-327": {}, "CZ-41": {}, "CZ-411": {}, + "CZ-412": {}, "CZ-413": {}, "CZ-42": {}, "CZ-421": {}, "CZ-422": {}, + "CZ-423": {}, "CZ-424": {}, "CZ-425": {}, "CZ-426": {}, "CZ-427": {}, + "CZ-51": {}, "CZ-511": {}, "CZ-512": {}, "CZ-513": {}, "CZ-514": {}, + "CZ-52": {}, "CZ-521": {}, "CZ-522": {}, "CZ-523": {}, "CZ-524": {}, + "CZ-525": {}, "CZ-53": {}, "CZ-531": {}, "CZ-532": {}, "CZ-533": {}, + "CZ-534": {}, "CZ-63": {}, "CZ-631": {}, "CZ-632": {}, "CZ-633": {}, + "CZ-634": {}, "CZ-635": {}, "CZ-64": {}, "CZ-641": {}, "CZ-642": {}, + "CZ-643": {}, "CZ-644": {}, "CZ-645": {}, "CZ-646": {}, "CZ-647": {}, + "CZ-71": {}, "CZ-711": {}, "CZ-712": {}, "CZ-713": {}, "CZ-714": {}, + "CZ-715": {}, "CZ-72": {}, "CZ-721": {}, "CZ-722": {}, "CZ-723": {}, + "CZ-724": {}, "CZ-80": {}, "CZ-801": {}, "CZ-802": {}, "CZ-803": {}, + "CZ-804": {}, "CZ-805": {}, "CZ-806": {}, "DE-BB": {}, "DE-BE": {}, + "DE-BW": {}, "DE-BY": {}, "DE-HB": {}, "DE-HE": {}, "DE-HH": {}, + "DE-MV": {}, "DE-NI": {}, "DE-NW": {}, "DE-RP": {}, "DE-SH": {}, + "DE-SL": {}, "DE-SN": {}, "DE-ST": {}, "DE-TH": {}, "DJ-AR": {}, + "DJ-AS": {}, "DJ-DI": {}, "DJ-DJ": {}, "DJ-OB": {}, "DJ-TA": {}, + "DK-81": {}, "DK-82": {}, "DK-83": {}, "DK-84": {}, "DK-85": {}, + "DM-01": {}, "DM-02": {}, "DM-03": {}, "DM-04": {}, "DM-05": {}, + "DM-06": {}, "DM-07": {}, "DM-08": {}, "DM-09": {}, "DM-10": {}, + "DO-01": {}, "DO-02": {}, "DO-03": {}, "DO-04": {}, "DO-05": {}, + "DO-06": {}, "DO-07": {}, "DO-08": {}, "DO-09": {}, "DO-10": {}, + "DO-11": {}, "DO-12": {}, "DO-13": {}, "DO-14": {}, "DO-15": {}, + "DO-16": {}, "DO-17": {}, "DO-18": {}, "DO-19": {}, "DO-20": {}, + "DO-21": {}, "DO-22": {}, "DO-23": {}, "DO-24": {}, "DO-25": {}, + "DO-26": {}, "DO-27": {}, "DO-28": {}, "DO-29": {}, "DO-30": {}, "DO-31": {}, + "DZ-01": {}, "DZ-02": {}, "DZ-03": {}, "DZ-04": {}, "DZ-05": {}, + "DZ-06": {}, "DZ-07": {}, "DZ-08": {}, "DZ-09": {}, "DZ-10": {}, + "DZ-11": {}, "DZ-12": {}, "DZ-13": {}, "DZ-14": {}, "DZ-15": {}, + "DZ-16": {}, "DZ-17": {}, "DZ-18": {}, "DZ-19": {}, "DZ-20": {}, + "DZ-21": {}, "DZ-22": {}, "DZ-23": {}, "DZ-24": {}, "DZ-25": {}, + "DZ-26": {}, "DZ-27": {}, "DZ-28": {}, "DZ-29": {}, "DZ-30": {}, + "DZ-31": {}, "DZ-32": {}, "DZ-33": {}, "DZ-34": {}, "DZ-35": {}, + "DZ-36": {}, "DZ-37": {}, "DZ-38": {}, "DZ-39": {}, "DZ-40": {}, + "DZ-41": {}, "DZ-42": {}, "DZ-43": {}, "DZ-44": {}, "DZ-45": {}, + "DZ-46": {}, "DZ-47": {}, "DZ-48": {}, "DZ-49": {}, "DZ-51": {}, + "DZ-53": {}, "DZ-55": {}, "DZ-56": {}, "DZ-57": {}, "EC-A": {}, "EC-B": {}, + "EC-C": {}, "EC-D": {}, "EC-E": {}, "EC-F": {}, "EC-G": {}, + "EC-H": {}, "EC-I": {}, "EC-L": {}, "EC-M": {}, "EC-N": {}, + "EC-O": {}, "EC-P": {}, "EC-R": {}, "EC-S": {}, "EC-SD": {}, + "EC-SE": {}, "EC-T": {}, "EC-U": {}, "EC-W": {}, "EC-X": {}, + "EC-Y": {}, "EC-Z": {}, "EE-37": {}, "EE-39": {}, "EE-44": {}, "EE-45": {}, + "EE-49": {}, "EE-50": {}, "EE-51": {}, "EE-52": {}, "EE-56": {}, "EE-57": {}, + "EE-59": {}, "EE-60": {}, "EE-64": {}, "EE-65": {}, "EE-67": {}, "EE-68": {}, + "EE-70": {}, "EE-71": {}, "EE-74": {}, "EE-78": {}, "EE-79": {}, "EE-81": {}, "EE-82": {}, + "EE-84": {}, "EE-86": {}, "EE-87": {}, "EG-ALX": {}, "EG-ASN": {}, "EG-AST": {}, + "EG-BA": {}, "EG-BH": {}, "EG-BNS": {}, "EG-C": {}, "EG-DK": {}, + "EG-DT": {}, "EG-FYM": {}, "EG-GH": {}, "EG-GZ": {}, "EG-HU": {}, + "EG-IS": {}, "EG-JS": {}, "EG-KB": {}, "EG-KFS": {}, "EG-KN": {}, + "EG-LX": {}, "EG-MN": {}, "EG-MNF": {}, "EG-MT": {}, "EG-PTS": {}, "EG-SHG": {}, + "EG-SHR": {}, "EG-SIN": {}, "EG-SU": {}, "EG-SUZ": {}, "EG-WAD": {}, + "ER-AN": {}, "ER-DK": {}, "ER-DU": {}, "ER-GB": {}, "ER-MA": {}, + "ER-SK": {}, "ES-A": {}, "ES-AB": {}, "ES-AL": {}, "ES-AN": {}, + "ES-AR": {}, "ES-AS": {}, "ES-AV": {}, "ES-B": {}, "ES-BA": {}, + "ES-BI": {}, "ES-BU": {}, "ES-C": {}, "ES-CA": {}, "ES-CB": {}, + "ES-CC": {}, "ES-CE": {}, "ES-CL": {}, "ES-CM": {}, "ES-CN": {}, + "ES-CO": {}, "ES-CR": {}, "ES-CS": {}, "ES-CT": {}, "ES-CU": {}, + "ES-EX": {}, "ES-GA": {}, "ES-GC": {}, "ES-GI": {}, "ES-GR": {}, + "ES-GU": {}, "ES-H": {}, "ES-HU": {}, "ES-IB": {}, "ES-J": {}, + "ES-L": {}, "ES-LE": {}, "ES-LO": {}, "ES-LU": {}, "ES-M": {}, + "ES-MA": {}, "ES-MC": {}, "ES-MD": {}, "ES-ML": {}, "ES-MU": {}, + "ES-NA": {}, "ES-NC": {}, "ES-O": {}, "ES-OR": {}, "ES-P": {}, + "ES-PM": {}, "ES-PO": {}, "ES-PV": {}, "ES-RI": {}, "ES-S": {}, + "ES-SA": {}, "ES-SE": {}, "ES-SG": {}, "ES-SO": {}, "ES-SS": {}, + "ES-T": {}, "ES-TE": {}, "ES-TF": {}, "ES-TO": {}, "ES-V": {}, + "ES-VA": {}, "ES-VC": {}, "ES-VI": {}, "ES-Z": {}, "ES-ZA": {}, + "ET-AA": {}, "ET-AF": {}, "ET-AM": {}, "ET-BE": {}, "ET-DD": {}, + "ET-GA": {}, "ET-HA": {}, "ET-OR": {}, "ET-SN": {}, "ET-SO": {}, + "ET-TI": {}, "FI-01": {}, "FI-02": {}, "FI-03": {}, "FI-04": {}, + "FI-05": {}, "FI-06": {}, "FI-07": {}, "FI-08": {}, "FI-09": {}, + "FI-10": {}, "FI-11": {}, "FI-12": {}, "FI-13": {}, "FI-14": {}, + "FI-15": {}, "FI-16": {}, "FI-17": {}, "FI-18": {}, "FI-19": {}, + "FJ-C": {}, "FJ-E": {}, "FJ-N": {}, "FJ-R": {}, "FJ-W": {}, + "FM-KSA": {}, "FM-PNI": {}, "FM-TRK": {}, "FM-YAP": {}, "FR-01": {}, + "FR-02": {}, "FR-03": {}, "FR-04": {}, "FR-05": {}, "FR-06": {}, + "FR-07": {}, "FR-08": {}, "FR-09": {}, "FR-10": {}, "FR-11": {}, + "FR-12": {}, "FR-13": {}, "FR-14": {}, "FR-15": {}, "FR-16": {}, + "FR-17": {}, "FR-18": {}, "FR-19": {}, "FR-20R": {}, "FR-21": {}, "FR-22": {}, + "FR-23": {}, "FR-24": {}, "FR-25": {}, "FR-26": {}, "FR-27": {}, + "FR-28": {}, "FR-29": {}, "FR-2A": {}, "FR-2B": {}, "FR-30": {}, + "FR-31": {}, "FR-32": {}, "FR-33": {}, "FR-34": {}, "FR-35": {}, + "FR-36": {}, "FR-37": {}, "FR-38": {}, "FR-39": {}, "FR-40": {}, + "FR-41": {}, "FR-42": {}, "FR-43": {}, "FR-44": {}, "FR-45": {}, + "FR-46": {}, "FR-47": {}, "FR-48": {}, "FR-49": {}, "FR-50": {}, + "FR-51": {}, "FR-52": {}, "FR-53": {}, "FR-54": {}, "FR-55": {}, + "FR-56": {}, "FR-57": {}, "FR-58": {}, "FR-59": {}, "FR-60": {}, + "FR-61": {}, "FR-62": {}, "FR-63": {}, "FR-64": {}, "FR-65": {}, + "FR-66": {}, "FR-67": {}, "FR-68": {}, "FR-69": {}, "FR-70": {}, + "FR-71": {}, "FR-72": {}, "FR-73": {}, "FR-74": {}, "FR-75": {}, + "FR-76": {}, "FR-77": {}, "FR-78": {}, "FR-79": {}, "FR-80": {}, + "FR-81": {}, "FR-82": {}, "FR-83": {}, "FR-84": {}, "FR-85": {}, + "FR-86": {}, "FR-87": {}, "FR-88": {}, "FR-89": {}, "FR-90": {}, + "FR-91": {}, "FR-92": {}, "FR-93": {}, "FR-94": {}, "FR-95": {}, + "FR-ARA": {}, "FR-BFC": {}, "FR-BL": {}, "FR-BRE": {}, "FR-COR": {}, + "FR-CP": {}, "FR-CVL": {}, "FR-GES": {}, "FR-GF": {}, "FR-GP": {}, + "FR-GUA": {}, "FR-HDF": {}, "FR-IDF": {}, "FR-LRE": {}, "FR-MAY": {}, + "FR-MF": {}, "FR-MQ": {}, "FR-NAQ": {}, "FR-NC": {}, "FR-NOR": {}, + "FR-OCC": {}, "FR-PAC": {}, "FR-PDL": {}, "FR-PF": {}, "FR-PM": {}, + "FR-RE": {}, "FR-TF": {}, "FR-WF": {}, "FR-YT": {}, "GA-1": {}, + "GA-2": {}, "GA-3": {}, "GA-4": {}, "GA-5": {}, "GA-6": {}, + "GA-7": {}, "GA-8": {}, "GA-9": {}, "GB-ABC": {}, "GB-ABD": {}, + "GB-ABE": {}, "GB-AGB": {}, "GB-AGY": {}, "GB-AND": {}, "GB-ANN": {}, + "GB-ANS": {}, "GB-BAS": {}, "GB-BBD": {}, "GB-BDF": {}, "GB-BDG": {}, + "GB-BEN": {}, "GB-BEX": {}, "GB-BFS": {}, "GB-BGE": {}, "GB-BGW": {}, + "GB-BIR": {}, "GB-BKM": {}, "GB-BMH": {}, "GB-BNE": {}, "GB-BNH": {}, + "GB-BNS": {}, "GB-BOL": {}, "GB-BPL": {}, "GB-BRC": {}, "GB-BRD": {}, + "GB-BRY": {}, "GB-BST": {}, "GB-BUR": {}, "GB-CAM": {}, "GB-CAY": {}, + "GB-CBF": {}, "GB-CCG": {}, "GB-CGN": {}, "GB-CHE": {}, "GB-CHW": {}, + "GB-CLD": {}, "GB-CLK": {}, "GB-CMA": {}, "GB-CMD": {}, "GB-CMN": {}, + "GB-CON": {}, "GB-COV": {}, "GB-CRF": {}, "GB-CRY": {}, "GB-CWY": {}, + "GB-DAL": {}, "GB-DBY": {}, "GB-DEN": {}, "GB-DER": {}, "GB-DEV": {}, + "GB-DGY": {}, "GB-DNC": {}, "GB-DND": {}, "GB-DOR": {}, "GB-DRS": {}, + "GB-DUD": {}, "GB-DUR": {}, "GB-EAL": {}, "GB-EAW": {}, "GB-EAY": {}, + "GB-EDH": {}, "GB-EDU": {}, "GB-ELN": {}, "GB-ELS": {}, "GB-ENF": {}, + "GB-ENG": {}, "GB-ERW": {}, "GB-ERY": {}, "GB-ESS": {}, "GB-ESX": {}, + "GB-FAL": {}, "GB-FIF": {}, "GB-FLN": {}, "GB-FMO": {}, "GB-GAT": {}, + "GB-GBN": {}, "GB-GLG": {}, "GB-GLS": {}, "GB-GRE": {}, "GB-GWN": {}, + "GB-HAL": {}, "GB-HAM": {}, "GB-HAV": {}, "GB-HCK": {}, "GB-HEF": {}, + "GB-HIL": {}, "GB-HLD": {}, "GB-HMF": {}, "GB-HNS": {}, "GB-HPL": {}, + "GB-HRT": {}, "GB-HRW": {}, "GB-HRY": {}, "GB-IOS": {}, "GB-IOW": {}, + "GB-ISL": {}, "GB-IVC": {}, "GB-KEC": {}, "GB-KEN": {}, "GB-KHL": {}, + "GB-KIR": {}, "GB-KTT": {}, "GB-KWL": {}, "GB-LAN": {}, "GB-LBC": {}, + "GB-LBH": {}, "GB-LCE": {}, "GB-LDS": {}, "GB-LEC": {}, "GB-LEW": {}, + "GB-LIN": {}, "GB-LIV": {}, "GB-LND": {}, "GB-LUT": {}, "GB-MAN": {}, + "GB-MDB": {}, "GB-MDW": {}, "GB-MEA": {}, "GB-MIK": {}, "GD-01": {}, + "GB-MLN": {}, "GB-MON": {}, "GB-MRT": {}, "GB-MRY": {}, "GB-MTY": {}, + "GB-MUL": {}, "GB-NAY": {}, "GB-NBL": {}, "GB-NEL": {}, "GB-NET": {}, + "GB-NFK": {}, "GB-NGM": {}, "GB-NIR": {}, "GB-NLK": {}, "GB-NLN": {}, + "GB-NMD": {}, "GB-NSM": {}, "GB-NTH": {}, "GB-NTL": {}, "GB-NTT": {}, + "GB-NTY": {}, "GB-NWM": {}, "GB-NWP": {}, "GB-NYK": {}, "GB-OLD": {}, + "GB-ORK": {}, "GB-OXF": {}, "GB-PEM": {}, "GB-PKN": {}, "GB-PLY": {}, + "GB-POL": {}, "GB-POR": {}, "GB-POW": {}, "GB-PTE": {}, "GB-RCC": {}, + "GB-RCH": {}, "GB-RCT": {}, "GB-RDB": {}, "GB-RDG": {}, "GB-RFW": {}, + "GB-RIC": {}, "GB-ROT": {}, "GB-RUT": {}, "GB-SAW": {}, "GB-SAY": {}, + "GB-SCB": {}, "GB-SCT": {}, "GB-SFK": {}, "GB-SFT": {}, "GB-SGC": {}, + "GB-SHF": {}, "GB-SHN": {}, "GB-SHR": {}, "GB-SKP": {}, "GB-SLF": {}, + "GB-SLG": {}, "GB-SLK": {}, "GB-SND": {}, "GB-SOL": {}, "GB-SOM": {}, + "GB-SOS": {}, "GB-SRY": {}, "GB-STE": {}, "GB-STG": {}, "GB-STH": {}, + "GB-STN": {}, "GB-STS": {}, "GB-STT": {}, "GB-STY": {}, "GB-SWA": {}, + "GB-SWD": {}, "GB-SWK": {}, "GB-TAM": {}, "GB-TFW": {}, "GB-THR": {}, + "GB-TOB": {}, "GB-TOF": {}, "GB-TRF": {}, "GB-TWH": {}, "GB-UKM": {}, + "GB-VGL": {}, "GB-WAR": {}, "GB-WBK": {}, "GB-WDU": {}, "GB-WFT": {}, + "GB-WGN": {}, "GB-WIL": {}, "GB-WKF": {}, "GB-WLL": {}, "GB-WLN": {}, + "GB-WLS": {}, "GB-WLV": {}, "GB-WND": {}, "GB-WNM": {}, "GB-WOK": {}, + "GB-WOR": {}, "GB-WRL": {}, "GB-WRT": {}, "GB-WRX": {}, "GB-WSM": {}, + "GB-WSX": {}, "GB-YOR": {}, "GB-ZET": {}, "GD-02": {}, "GD-03": {}, + "GD-04": {}, "GD-05": {}, "GD-06": {}, "GD-10": {}, "GE-AB": {}, + "GE-AJ": {}, "GE-GU": {}, "GE-IM": {}, "GE-KA": {}, "GE-KK": {}, + "GE-MM": {}, "GE-RL": {}, "GE-SJ": {}, "GE-SK": {}, "GE-SZ": {}, + "GE-TB": {}, "GH-AA": {}, "GH-AH": {}, "GH-AF": {}, "GH-BA": {}, "GH-BO": {}, "GH-BE": {}, "GH-CP": {}, + "GH-EP": {}, "GH-NP": {}, "GH-TV": {}, "GH-UE": {}, "GH-UW": {}, + "GH-WP": {}, "GL-AV": {}, "GL-KU": {}, "GL-QA": {}, "GL-QT": {}, "GL-QE": {}, "GL-SM": {}, + "GM-B": {}, "GM-L": {}, "GM-M": {}, "GM-N": {}, "GM-U": {}, + "GM-W": {}, "GN-B": {}, "GN-BE": {}, "GN-BF": {}, "GN-BK": {}, + "GN-C": {}, "GN-CO": {}, "GN-D": {}, "GN-DB": {}, "GN-DI": {}, + "GN-DL": {}, "GN-DU": {}, "GN-F": {}, "GN-FA": {}, "GN-FO": {}, + "GN-FR": {}, "GN-GA": {}, "GN-GU": {}, "GN-K": {}, "GN-KA": {}, + "GN-KB": {}, "GN-KD": {}, "GN-KE": {}, "GN-KN": {}, "GN-KO": {}, + "GN-KS": {}, "GN-L": {}, "GN-LA": {}, "GN-LE": {}, "GN-LO": {}, + "GN-M": {}, "GN-MC": {}, "GN-MD": {}, "GN-ML": {}, "GN-MM": {}, + "GN-N": {}, "GN-NZ": {}, "GN-PI": {}, "GN-SI": {}, "GN-TE": {}, + "GN-TO": {}, "GN-YO": {}, "GQ-AN": {}, "GQ-BN": {}, "GQ-BS": {}, + "GQ-C": {}, "GQ-CS": {}, "GQ-I": {}, "GQ-KN": {}, "GQ-LI": {}, + "GQ-WN": {}, "GR-01": {}, "GR-03": {}, "GR-04": {}, "GR-05": {}, + "GR-06": {}, "GR-07": {}, "GR-11": {}, "GR-12": {}, "GR-13": {}, + "GR-14": {}, "GR-15": {}, "GR-16": {}, "GR-17": {}, "GR-21": {}, + "GR-22": {}, "GR-23": {}, "GR-24": {}, "GR-31": {}, "GR-32": {}, + "GR-33": {}, "GR-34": {}, "GR-41": {}, "GR-42": {}, "GR-43": {}, + "GR-44": {}, "GR-51": {}, "GR-52": {}, "GR-53": {}, "GR-54": {}, + "GR-55": {}, "GR-56": {}, "GR-57": {}, "GR-58": {}, "GR-59": {}, + "GR-61": {}, "GR-62": {}, "GR-63": {}, "GR-64": {}, "GR-69": {}, + "GR-71": {}, "GR-72": {}, "GR-73": {}, "GR-81": {}, "GR-82": {}, + "GR-83": {}, "GR-84": {}, "GR-85": {}, "GR-91": {}, "GR-92": {}, + "GR-93": {}, "GR-94": {}, "GR-A": {}, "GR-A1": {}, "GR-B": {}, + "GR-C": {}, "GR-D": {}, "GR-E": {}, "GR-F": {}, "GR-G": {}, + "GR-H": {}, "GR-I": {}, "GR-J": {}, "GR-K": {}, "GR-L": {}, + "GR-M": {}, "GT-01": {}, "GT-02": {}, "GT-03": {}, "GT-04": {}, + "GT-05": {}, "GT-06": {}, "GT-07": {}, "GT-08": {}, "GT-09": {}, + "GT-10": {}, "GT-11": {}, "GT-12": {}, "GT-13": {}, "GT-14": {}, + "GT-15": {}, "GT-16": {}, "GT-17": {}, "GT-18": {}, "GT-19": {}, + "GT-20": {}, "GT-21": {}, "GT-22": {}, "GW-BA": {}, "GW-BL": {}, + "GW-BM": {}, "GW-BS": {}, "GW-CA": {}, "GW-GA": {}, "GW-L": {}, + "GW-N": {}, "GW-OI": {}, "GW-QU": {}, "GW-S": {}, "GW-TO": {}, + "GY-BA": {}, "GY-CU": {}, "GY-DE": {}, "GY-EB": {}, "GY-ES": {}, + "GY-MA": {}, "GY-PM": {}, "GY-PT": {}, "GY-UD": {}, "GY-UT": {}, + "HN-AT": {}, "HN-CH": {}, "HN-CL": {}, "HN-CM": {}, "HN-CP": {}, + "HN-CR": {}, "HN-EP": {}, "HN-FM": {}, "HN-GD": {}, "HN-IB": {}, + "HN-IN": {}, "HN-LE": {}, "HN-LP": {}, "HN-OC": {}, "HN-OL": {}, + "HN-SB": {}, "HN-VA": {}, "HN-YO": {}, "HR-01": {}, "HR-02": {}, + "HR-03": {}, "HR-04": {}, "HR-05": {}, "HR-06": {}, "HR-07": {}, + "HR-08": {}, "HR-09": {}, "HR-10": {}, "HR-11": {}, "HR-12": {}, + "HR-13": {}, "HR-14": {}, "HR-15": {}, "HR-16": {}, "HR-17": {}, + "HR-18": {}, "HR-19": {}, "HR-20": {}, "HR-21": {}, "HT-AR": {}, + "HT-CE": {}, "HT-GA": {}, "HT-ND": {}, "HT-NE": {}, "HT-NO": {}, "HT-NI": {}, + "HT-OU": {}, "HT-SD": {}, "HT-SE": {}, "HU-BA": {}, "HU-BC": {}, + "HU-BE": {}, "HU-BK": {}, "HU-BU": {}, "HU-BZ": {}, "HU-CS": {}, + "HU-DE": {}, "HU-DU": {}, "HU-EG": {}, "HU-ER": {}, "HU-FE": {}, + "HU-GS": {}, "HU-GY": {}, "HU-HB": {}, "HU-HE": {}, "HU-HV": {}, + "HU-JN": {}, "HU-KE": {}, "HU-KM": {}, "HU-KV": {}, "HU-MI": {}, + "HU-NK": {}, "HU-NO": {}, "HU-NY": {}, "HU-PE": {}, "HU-PS": {}, + "HU-SD": {}, "HU-SF": {}, "HU-SH": {}, "HU-SK": {}, "HU-SN": {}, + "HU-SO": {}, "HU-SS": {}, "HU-ST": {}, "HU-SZ": {}, "HU-TB": {}, + "HU-TO": {}, "HU-VA": {}, "HU-VE": {}, "HU-VM": {}, "HU-ZA": {}, + "HU-ZE": {}, "ID-AC": {}, "ID-BA": {}, "ID-BB": {}, "ID-BE": {}, + "ID-BT": {}, "ID-GO": {}, "ID-IJ": {}, "ID-JA": {}, "ID-JB": {}, + "ID-JI": {}, "ID-JK": {}, "ID-JT": {}, "ID-JW": {}, "ID-KA": {}, + "ID-KB": {}, "ID-KI": {}, "ID-KU": {}, "ID-KR": {}, "ID-KS": {}, + "ID-KT": {}, "ID-LA": {}, "ID-MA": {}, "ID-ML": {}, "ID-MU": {}, + "ID-NB": {}, "ID-NT": {}, "ID-NU": {}, "ID-PA": {}, "ID-PB": {}, + "ID-PE": {}, "ID-PP": {}, "ID-PS": {}, "ID-PT": {}, "ID-RI": {}, + "ID-SA": {}, "ID-SB": {}, "ID-SG": {}, "ID-SL": {}, "ID-SM": {}, + "ID-SN": {}, "ID-SR": {}, "ID-SS": {}, "ID-ST": {}, "ID-SU": {}, + "ID-YO": {}, "IE-C": {}, "IE-CE": {}, "IE-CN": {}, "IE-CO": {}, + "IE-CW": {}, "IE-D": {}, "IE-DL": {}, "IE-G": {}, "IE-KE": {}, + "IE-KK": {}, "IE-KY": {}, "IE-L": {}, "IE-LD": {}, "IE-LH": {}, + "IE-LK": {}, "IE-LM": {}, "IE-LS": {}, "IE-M": {}, "IE-MH": {}, + "IE-MN": {}, "IE-MO": {}, "IE-OY": {}, "IE-RN": {}, "IE-SO": {}, + "IE-TA": {}, "IE-U": {}, "IE-WD": {}, "IE-WH": {}, "IE-WW": {}, + "IE-WX": {}, "IL-D": {}, "IL-HA": {}, "IL-JM": {}, "IL-M": {}, + "IL-TA": {}, "IL-Z": {}, "IN-AN": {}, "IN-AP": {}, "IN-AR": {}, + "IN-AS": {}, "IN-BR": {}, "IN-CH": {}, "IN-CT": {}, "IN-DH": {}, + "IN-DL": {}, "IN-DN": {}, "IN-GA": {}, "IN-GJ": {}, "IN-HP": {}, + "IN-HR": {}, "IN-JH": {}, "IN-JK": {}, "IN-KA": {}, "IN-KL": {}, + "IN-LD": {}, "IN-MH": {}, "IN-ML": {}, "IN-MN": {}, "IN-MP": {}, + "IN-MZ": {}, "IN-NL": {}, "IN-TG": {}, "IN-OR": {}, "IN-PB": {}, "IN-PY": {}, + "IN-RJ": {}, "IN-SK": {}, "IN-TN": {}, "IN-TR": {}, "IN-UP": {}, + "IN-UT": {}, "IN-WB": {}, "IQ-AN": {}, "IQ-AR": {}, "IQ-BA": {}, + "IQ-BB": {}, "IQ-BG": {}, "IQ-DA": {}, "IQ-DI": {}, "IQ-DQ": {}, + "IQ-KA": {}, "IQ-KI": {}, "IQ-MA": {}, "IQ-MU": {}, "IQ-NA": {}, "IQ-NI": {}, + "IQ-QA": {}, "IQ-SD": {}, "IQ-SW": {}, "IQ-SU": {}, "IQ-TS": {}, "IQ-WA": {}, + "IR-00": {}, "IR-01": {}, "IR-02": {}, "IR-03": {}, "IR-04": {}, "IR-05": {}, + "IR-06": {}, "IR-07": {}, "IR-08": {}, "IR-09": {}, "IR-10": {}, "IR-11": {}, + "IR-12": {}, "IR-13": {}, "IR-14": {}, "IR-15": {}, "IR-16": {}, + "IR-17": {}, "IR-18": {}, "IR-19": {}, "IR-20": {}, "IR-21": {}, + "IR-22": {}, "IR-23": {}, "IR-24": {}, "IR-25": {}, "IR-26": {}, + "IR-27": {}, "IR-28": {}, "IR-29": {}, "IR-30": {}, "IR-31": {}, + "IS-0": {}, "IS-1": {}, "IS-2": {}, "IS-3": {}, "IS-4": {}, + "IS-5": {}, "IS-6": {}, "IS-7": {}, "IS-8": {}, "IT-21": {}, + "IT-23": {}, "IT-25": {}, "IT-32": {}, "IT-34": {}, "IT-36": {}, + "IT-42": {}, "IT-45": {}, "IT-52": {}, "IT-55": {}, "IT-57": {}, + "IT-62": {}, "IT-65": {}, "IT-67": {}, "IT-72": {}, "IT-75": {}, + "IT-77": {}, "IT-78": {}, "IT-82": {}, "IT-88": {}, "IT-AG": {}, + "IT-AL": {}, "IT-AN": {}, "IT-AO": {}, "IT-AP": {}, "IT-AQ": {}, + "IT-AR": {}, "IT-AT": {}, "IT-AV": {}, "IT-BA": {}, "IT-BG": {}, + "IT-BI": {}, "IT-BL": {}, "IT-BN": {}, "IT-BO": {}, "IT-BR": {}, + "IT-BS": {}, "IT-BT": {}, "IT-BZ": {}, "IT-CA": {}, "IT-CB": {}, + "IT-CE": {}, "IT-CH": {}, "IT-CI": {}, "IT-CL": {}, "IT-CN": {}, + "IT-CO": {}, "IT-CR": {}, "IT-CS": {}, "IT-CT": {}, "IT-CZ": {}, + "IT-EN": {}, "IT-FC": {}, "IT-FE": {}, "IT-FG": {}, "IT-FI": {}, + "IT-FM": {}, "IT-FR": {}, "IT-GE": {}, "IT-GO": {}, "IT-GR": {}, + "IT-IM": {}, "IT-IS": {}, "IT-KR": {}, "IT-LC": {}, "IT-LE": {}, + "IT-LI": {}, "IT-LO": {}, "IT-LT": {}, "IT-LU": {}, "IT-MB": {}, + "IT-MC": {}, "IT-ME": {}, "IT-MI": {}, "IT-MN": {}, "IT-MO": {}, + "IT-MS": {}, "IT-MT": {}, "IT-NA": {}, "IT-NO": {}, "IT-NU": {}, + "IT-OG": {}, "IT-OR": {}, "IT-OT": {}, "IT-PA": {}, "IT-PC": {}, + "IT-PD": {}, "IT-PE": {}, "IT-PG": {}, "IT-PI": {}, "IT-PN": {}, + "IT-PO": {}, "IT-PR": {}, "IT-PT": {}, "IT-PU": {}, "IT-PV": {}, + "IT-PZ": {}, "IT-RA": {}, "IT-RC": {}, "IT-RE": {}, "IT-RG": {}, + "IT-RI": {}, "IT-RM": {}, "IT-RN": {}, "IT-RO": {}, "IT-SA": {}, + "IT-SI": {}, "IT-SO": {}, "IT-SP": {}, "IT-SR": {}, "IT-SS": {}, + "IT-SV": {}, "IT-TA": {}, "IT-TE": {}, "IT-TN": {}, "IT-TO": {}, + "IT-TP": {}, "IT-TR": {}, "IT-TS": {}, "IT-TV": {}, "IT-UD": {}, + "IT-VA": {}, "IT-VB": {}, "IT-VC": {}, "IT-VE": {}, "IT-VI": {}, + "IT-VR": {}, "IT-VS": {}, "IT-VT": {}, "IT-VV": {}, "JM-01": {}, + "JM-02": {}, "JM-03": {}, "JM-04": {}, "JM-05": {}, "JM-06": {}, + "JM-07": {}, "JM-08": {}, "JM-09": {}, "JM-10": {}, "JM-11": {}, + "JM-12": {}, "JM-13": {}, "JM-14": {}, "JO-AJ": {}, "JO-AM": {}, + "JO-AQ": {}, "JO-AT": {}, "JO-AZ": {}, "JO-BA": {}, "JO-IR": {}, + "JO-JA": {}, "JO-KA": {}, "JO-MA": {}, "JO-MD": {}, "JO-MN": {}, + "JP-01": {}, "JP-02": {}, "JP-03": {}, "JP-04": {}, "JP-05": {}, + "JP-06": {}, "JP-07": {}, "JP-08": {}, "JP-09": {}, "JP-10": {}, + "JP-11": {}, "JP-12": {}, "JP-13": {}, "JP-14": {}, "JP-15": {}, + "JP-16": {}, "JP-17": {}, "JP-18": {}, "JP-19": {}, "JP-20": {}, + "JP-21": {}, "JP-22": {}, "JP-23": {}, "JP-24": {}, "JP-25": {}, + "JP-26": {}, "JP-27": {}, "JP-28": {}, "JP-29": {}, "JP-30": {}, + "JP-31": {}, "JP-32": {}, "JP-33": {}, "JP-34": {}, "JP-35": {}, + "JP-36": {}, "JP-37": {}, "JP-38": {}, "JP-39": {}, "JP-40": {}, + "JP-41": {}, "JP-42": {}, "JP-43": {}, "JP-44": {}, "JP-45": {}, + "JP-46": {}, "JP-47": {}, "KE-01": {}, "KE-02": {}, "KE-03": {}, + "KE-04": {}, "KE-05": {}, "KE-06": {}, "KE-07": {}, "KE-08": {}, + "KE-09": {}, "KE-10": {}, "KE-11": {}, "KE-12": {}, "KE-13": {}, + "KE-14": {}, "KE-15": {}, "KE-16": {}, "KE-17": {}, "KE-18": {}, + "KE-19": {}, "KE-20": {}, "KE-21": {}, "KE-22": {}, "KE-23": {}, + "KE-24": {}, "KE-25": {}, "KE-26": {}, "KE-27": {}, "KE-28": {}, + "KE-29": {}, "KE-30": {}, "KE-31": {}, "KE-32": {}, "KE-33": {}, + "KE-34": {}, "KE-35": {}, "KE-36": {}, "KE-37": {}, "KE-38": {}, + "KE-39": {}, "KE-40": {}, "KE-41": {}, "KE-42": {}, "KE-43": {}, + "KE-44": {}, "KE-45": {}, "KE-46": {}, "KE-47": {}, "KG-B": {}, + "KG-C": {}, "KG-GB": {}, "KG-GO": {}, "KG-J": {}, "KG-N": {}, "KG-O": {}, + "KG-T": {}, "KG-Y": {}, "KH-1": {}, "KH-10": {}, "KH-11": {}, + "KH-12": {}, "KH-13": {}, "KH-14": {}, "KH-15": {}, "KH-16": {}, + "KH-17": {}, "KH-18": {}, "KH-19": {}, "KH-2": {}, "KH-20": {}, + "KH-21": {}, "KH-22": {}, "KH-23": {}, "KH-24": {}, "KH-3": {}, + "KH-4": {}, "KH-5": {}, "KH-6": {}, "KH-7": {}, "KH-8": {}, + "KH-9": {}, "KI-G": {}, "KI-L": {}, "KI-P": {}, "KM-A": {}, + "KM-G": {}, "KM-M": {}, "KN-01": {}, "KN-02": {}, "KN-03": {}, + "KN-04": {}, "KN-05": {}, "KN-06": {}, "KN-07": {}, "KN-08": {}, + "KN-09": {}, "KN-10": {}, "KN-11": {}, "KN-12": {}, "KN-13": {}, + "KN-15": {}, "KN-K": {}, "KN-N": {}, "KP-01": {}, "KP-02": {}, + "KP-03": {}, "KP-04": {}, "KP-05": {}, "KP-06": {}, "KP-07": {}, + "KP-08": {}, "KP-09": {}, "KP-10": {}, "KP-13": {}, "KR-11": {}, + "KR-26": {}, "KR-27": {}, "KR-28": {}, "KR-29": {}, "KR-30": {}, + "KR-31": {}, "KR-41": {}, "KR-42": {}, "KR-43": {}, "KR-44": {}, + "KR-45": {}, "KR-46": {}, "KR-47": {}, "KR-48": {}, "KR-49": {}, + "KW-AH": {}, "KW-FA": {}, "KW-HA": {}, "KW-JA": {}, "KW-KU": {}, + "KW-MU": {}, "KZ-10": {}, "KZ-75": {}, "KZ-19": {}, "KZ-11": {}, + "KZ-15": {}, "KZ-71": {}, "KZ-23": {}, "KZ-27": {}, "KZ-47": {}, + "KZ-55": {}, "KZ-35": {}, "KZ-39": {}, "KZ-43": {}, "KZ-63": {}, + "KZ-79": {}, "KZ-59": {}, "KZ-61": {}, "KZ-62": {}, "KZ-31": {}, + "KZ-33": {}, "LA-AT": {}, "LA-BK": {}, "LA-BL": {}, + "LA-CH": {}, "LA-HO": {}, "LA-KH": {}, "LA-LM": {}, "LA-LP": {}, + "LA-OU": {}, "LA-PH": {}, "LA-SL": {}, "LA-SV": {}, "LA-VI": {}, + "LA-VT": {}, "LA-XA": {}, "LA-XE": {}, "LA-XI": {}, "LA-XS": {}, + "LB-AK": {}, "LB-AS": {}, "LB-BA": {}, "LB-BH": {}, "LB-BI": {}, + "LB-JA": {}, "LB-JL": {}, "LB-NA": {}, "LC-01": {}, "LC-02": {}, + "LC-03": {}, "LC-05": {}, "LC-06": {}, "LC-07": {}, "LC-08": {}, + "LC-10": {}, "LC-11": {}, "LI-01": {}, "LI-02": {}, + "LI-03": {}, "LI-04": {}, "LI-05": {}, "LI-06": {}, "LI-07": {}, + "LI-08": {}, "LI-09": {}, "LI-10": {}, "LI-11": {}, "LK-1": {}, + "LK-11": {}, "LK-12": {}, "LK-13": {}, "LK-2": {}, "LK-21": {}, + "LK-22": {}, "LK-23": {}, "LK-3": {}, "LK-31": {}, "LK-32": {}, + "LK-33": {}, "LK-4": {}, "LK-41": {}, "LK-42": {}, "LK-43": {}, + "LK-44": {}, "LK-45": {}, "LK-5": {}, "LK-51": {}, "LK-52": {}, + "LK-53": {}, "LK-6": {}, "LK-61": {}, "LK-62": {}, "LK-7": {}, + "LK-71": {}, "LK-72": {}, "LK-8": {}, "LK-81": {}, "LK-82": {}, + "LK-9": {}, "LK-91": {}, "LK-92": {}, "LR-BG": {}, "LR-BM": {}, + "LR-CM": {}, "LR-GB": {}, "LR-GG": {}, "LR-GK": {}, "LR-LO": {}, + "LR-MG": {}, "LR-MO": {}, "LR-MY": {}, "LR-NI": {}, "LR-RI": {}, + "LR-SI": {}, "LS-A": {}, "LS-B": {}, "LS-C": {}, "LS-D": {}, + "LS-E": {}, "LS-F": {}, "LS-G": {}, "LS-H": {}, "LS-J": {}, + "LS-K": {}, "LT-AL": {}, "LT-KL": {}, "LT-KU": {}, "LT-MR": {}, + "LT-PN": {}, "LT-SA": {}, "LT-TA": {}, "LT-TE": {}, "LT-UT": {}, + "LT-VL": {}, "LU-CA": {}, "LU-CL": {}, "LU-DI": {}, "LU-EC": {}, + "LU-ES": {}, "LU-GR": {}, "LU-LU": {}, "LU-ME": {}, "LU-RD": {}, + "LU-RM": {}, "LU-VD": {}, "LU-WI": {}, "LU-D": {}, "LU-G": {}, "LU-L": {}, + "LV-001": {}, "LV-111": {}, "LV-112": {}, "LV-113": {}, + "LV-002": {}, "LV-003": {}, "LV-004": {}, "LV-005": {}, "LV-006": {}, + "LV-007": {}, "LV-008": {}, "LV-009": {}, "LV-010": {}, "LV-011": {}, + "LV-012": {}, "LV-013": {}, "LV-014": {}, "LV-015": {}, "LV-016": {}, + "LV-017": {}, "LV-018": {}, "LV-019": {}, "LV-020": {}, "LV-021": {}, + "LV-022": {}, "LV-023": {}, "LV-024": {}, "LV-025": {}, "LV-026": {}, + "LV-027": {}, "LV-028": {}, "LV-029": {}, "LV-030": {}, "LV-031": {}, + "LV-032": {}, "LV-033": {}, "LV-034": {}, "LV-035": {}, "LV-036": {}, + "LV-037": {}, "LV-038": {}, "LV-039": {}, "LV-040": {}, "LV-041": {}, + "LV-042": {}, "LV-043": {}, "LV-044": {}, "LV-045": {}, "LV-046": {}, + "LV-047": {}, "LV-048": {}, "LV-049": {}, "LV-050": {}, "LV-051": {}, + "LV-052": {}, "LV-053": {}, "LV-054": {}, "LV-055": {}, "LV-056": {}, + "LV-057": {}, "LV-058": {}, "LV-059": {}, "LV-060": {}, "LV-061": {}, + "LV-062": {}, "LV-063": {}, "LV-064": {}, "LV-065": {}, "LV-066": {}, + "LV-067": {}, "LV-068": {}, "LV-069": {}, "LV-070": {}, "LV-071": {}, + "LV-072": {}, "LV-073": {}, "LV-074": {}, "LV-075": {}, "LV-076": {}, + "LV-077": {}, "LV-078": {}, "LV-079": {}, "LV-080": {}, "LV-081": {}, + "LV-082": {}, "LV-083": {}, "LV-084": {}, "LV-085": {}, "LV-086": {}, + "LV-087": {}, "LV-088": {}, "LV-089": {}, "LV-090": {}, "LV-091": {}, + "LV-092": {}, "LV-093": {}, "LV-094": {}, "LV-095": {}, "LV-096": {}, + "LV-097": {}, "LV-098": {}, "LV-099": {}, "LV-100": {}, "LV-101": {}, + "LV-102": {}, "LV-103": {}, "LV-104": {}, "LV-105": {}, "LV-106": {}, + "LV-107": {}, "LV-108": {}, "LV-109": {}, "LV-110": {}, "LV-DGV": {}, + "LV-JEL": {}, "LV-JKB": {}, "LV-JUR": {}, "LV-LPX": {}, "LV-REZ": {}, + "LV-RIX": {}, "LV-VEN": {}, "LV-VMR": {}, "LY-BA": {}, "LY-BU": {}, + "LY-DR": {}, "LY-GT": {}, "LY-JA": {}, "LY-JB": {}, "LY-JG": {}, + "LY-JI": {}, "LY-JU": {}, "LY-KF": {}, "LY-MB": {}, "LY-MI": {}, + "LY-MJ": {}, "LY-MQ": {}, "LY-NL": {}, "LY-NQ": {}, "LY-SB": {}, + "LY-SR": {}, "LY-TB": {}, "LY-WA": {}, "LY-WD": {}, "LY-WS": {}, + "LY-ZA": {}, "MA-01": {}, "MA-02": {}, "MA-03": {}, "MA-04": {}, + "MA-05": {}, "MA-06": {}, "MA-07": {}, "MA-08": {}, "MA-09": {}, + "MA-10": {}, "MA-11": {}, "MA-12": {}, "MA-13": {}, "MA-14": {}, + "MA-15": {}, "MA-16": {}, "MA-AGD": {}, "MA-AOU": {}, "MA-ASZ": {}, + "MA-AZI": {}, "MA-BEM": {}, "MA-BER": {}, "MA-BES": {}, "MA-BOD": {}, + "MA-BOM": {}, "MA-CAS": {}, "MA-CHE": {}, "MA-CHI": {}, "MA-CHT": {}, + "MA-ERR": {}, "MA-ESI": {}, "MA-ESM": {}, "MA-FAH": {}, "MA-FES": {}, + "MA-FIG": {}, "MA-GUE": {}, "MA-HAJ": {}, "MA-HAO": {}, "MA-HOC": {}, + "MA-IFR": {}, "MA-INE": {}, "MA-JDI": {}, "MA-JRA": {}, "MA-KEN": {}, + "MA-KES": {}, "MA-KHE": {}, "MA-KHN": {}, "MA-KHO": {}, "MA-LAA": {}, + "MA-LAR": {}, "MA-MED": {}, "MA-MEK": {}, "MA-MMD": {}, "MA-MMN": {}, + "MA-MOH": {}, "MA-MOU": {}, "MA-NAD": {}, "MA-NOU": {}, "MA-OUA": {}, + "MA-OUD": {}, "MA-OUJ": {}, "MA-RAB": {}, "MA-SAF": {}, "MA-SAL": {}, + "MA-SEF": {}, "MA-SET": {}, "MA-SIK": {}, "MA-SKH": {}, "MA-SYB": {}, + "MA-TAI": {}, "MA-TAO": {}, "MA-TAR": {}, "MA-TAT": {}, "MA-TAZ": {}, + "MA-TET": {}, "MA-TIZ": {}, "MA-TNG": {}, "MA-TNT": {}, "MA-ZAG": {}, + "MC-CL": {}, "MC-CO": {}, "MC-FO": {}, "MC-GA": {}, "MC-JE": {}, + "MC-LA": {}, "MC-MA": {}, "MC-MC": {}, "MC-MG": {}, "MC-MO": {}, + "MC-MU": {}, "MC-PH": {}, "MC-SD": {}, "MC-SO": {}, "MC-SP": {}, + "MC-SR": {}, "MC-VR": {}, "MD-AN": {}, "MD-BA": {}, "MD-BD": {}, + "MD-BR": {}, "MD-BS": {}, "MD-CA": {}, "MD-CL": {}, "MD-CM": {}, + "MD-CR": {}, "MD-CS": {}, "MD-CT": {}, "MD-CU": {}, "MD-DO": {}, + "MD-DR": {}, "MD-DU": {}, "MD-ED": {}, "MD-FA": {}, "MD-FL": {}, + "MD-GA": {}, "MD-GL": {}, "MD-HI": {}, "MD-IA": {}, "MD-LE": {}, + "MD-NI": {}, "MD-OC": {}, "MD-OR": {}, "MD-RE": {}, "MD-RI": {}, + "MD-SD": {}, "MD-SI": {}, "MD-SN": {}, "MD-SO": {}, "MD-ST": {}, + "MD-SV": {}, "MD-TA": {}, "MD-TE": {}, "MD-UN": {}, "ME-01": {}, + "ME-02": {}, "ME-03": {}, "ME-04": {}, "ME-05": {}, "ME-06": {}, + "ME-07": {}, "ME-08": {}, "ME-09": {}, "ME-10": {}, "ME-11": {}, + "ME-12": {}, "ME-13": {}, "ME-14": {}, "ME-15": {}, "ME-16": {}, + "ME-17": {}, "ME-18": {}, "ME-19": {}, "ME-20": {}, "ME-21": {}, "ME-24": {}, + "MG-A": {}, "MG-D": {}, "MG-F": {}, "MG-M": {}, "MG-T": {}, + "MG-U": {}, "MH-ALK": {}, "MH-ALL": {}, "MH-ARN": {}, "MH-AUR": {}, + "MH-EBO": {}, "MH-ENI": {}, "MH-JAB": {}, "MH-JAL": {}, "MH-KIL": {}, + "MH-KWA": {}, "MH-L": {}, "MH-LAE": {}, "MH-LIB": {}, "MH-LIK": {}, + "MH-MAJ": {}, "MH-MAL": {}, "MH-MEJ": {}, "MH-MIL": {}, "MH-NMK": {}, + "MH-NMU": {}, "MH-RON": {}, "MH-T": {}, "MH-UJA": {}, "MH-UTI": {}, + "MH-WTJ": {}, "MH-WTN": {}, "MK-101": {}, "MK-102": {}, "MK-103": {}, + "MK-104": {}, "MK-105": {}, + "MK-106": {}, "MK-107": {}, "MK-108": {}, "MK-109": {}, "MK-201": {}, + "MK-202": {}, "MK-205": {}, "MK-206": {}, "MK-207": {}, "MK-208": {}, + "MK-209": {}, "MK-210": {}, "MK-211": {}, "MK-301": {}, "MK-303": {}, + "MK-307": {}, "MK-308": {}, "MK-310": {}, "MK-311": {}, "MK-312": {}, + "MK-401": {}, "MK-402": {}, "MK-403": {}, "MK-404": {}, "MK-405": {}, + "MK-406": {}, "MK-408": {}, "MK-409": {}, "MK-410": {}, "MK-501": {}, + "MK-502": {}, "MK-503": {}, "MK-505": {}, "MK-506": {}, "MK-507": {}, + "MK-508": {}, "MK-509": {}, "MK-601": {}, "MK-602": {}, "MK-604": {}, + "MK-605": {}, "MK-606": {}, "MK-607": {}, "MK-608": {}, "MK-609": {}, + "MK-701": {}, "MK-702": {}, "MK-703": {}, "MK-704": {}, "MK-705": {}, + "MK-803": {}, "MK-804": {}, "MK-806": {}, "MK-807": {}, "MK-809": {}, + "MK-810": {}, "MK-811": {}, "MK-812": {}, "MK-813": {}, "MK-814": {}, + "MK-816": {}, "ML-1": {}, "ML-2": {}, "ML-3": {}, "ML-4": {}, + "ML-5": {}, "ML-6": {}, "ML-7": {}, "ML-8": {}, "ML-BKO": {}, + "MM-01": {}, "MM-02": {}, "MM-03": {}, "MM-04": {}, "MM-05": {}, + "MM-06": {}, "MM-07": {}, "MM-11": {}, "MM-12": {}, "MM-13": {}, + "MM-14": {}, "MM-15": {}, "MM-16": {}, "MM-17": {}, "MM-18": {}, "MN-035": {}, + "MN-037": {}, "MN-039": {}, "MN-041": {}, "MN-043": {}, "MN-046": {}, + "MN-047": {}, "MN-049": {}, "MN-051": {}, "MN-053": {}, "MN-055": {}, + "MN-057": {}, "MN-059": {}, "MN-061": {}, "MN-063": {}, "MN-064": {}, + "MN-065": {}, "MN-067": {}, "MN-069": {}, "MN-071": {}, "MN-073": {}, + "MN-1": {}, "MR-01": {}, "MR-02": {}, "MR-03": {}, "MR-04": {}, + "MR-05": {}, "MR-06": {}, "MR-07": {}, "MR-08": {}, "MR-09": {}, + "MR-10": {}, "MR-11": {}, "MR-12": {}, "MR-13": {}, "MR-NKC": {}, "MT-01": {}, + "MT-02": {}, "MT-03": {}, "MT-04": {}, "MT-05": {}, "MT-06": {}, + "MT-07": {}, "MT-08": {}, "MT-09": {}, "MT-10": {}, "MT-11": {}, + "MT-12": {}, "MT-13": {}, "MT-14": {}, "MT-15": {}, "MT-16": {}, + "MT-17": {}, "MT-18": {}, "MT-19": {}, "MT-20": {}, "MT-21": {}, + "MT-22": {}, "MT-23": {}, "MT-24": {}, "MT-25": {}, "MT-26": {}, + "MT-27": {}, "MT-28": {}, "MT-29": {}, "MT-30": {}, "MT-31": {}, + "MT-32": {}, "MT-33": {}, "MT-34": {}, "MT-35": {}, "MT-36": {}, + "MT-37": {}, "MT-38": {}, "MT-39": {}, "MT-40": {}, "MT-41": {}, + "MT-42": {}, "MT-43": {}, "MT-44": {}, "MT-45": {}, "MT-46": {}, + "MT-47": {}, "MT-48": {}, "MT-49": {}, "MT-50": {}, "MT-51": {}, + "MT-52": {}, "MT-53": {}, "MT-54": {}, "MT-55": {}, "MT-56": {}, + "MT-57": {}, "MT-58": {}, "MT-59": {}, "MT-60": {}, "MT-61": {}, + "MT-62": {}, "MT-63": {}, "MT-64": {}, "MT-65": {}, "MT-66": {}, + "MT-67": {}, "MT-68": {}, "MU-AG": {}, "MU-BL": {}, "MU-BR": {}, + "MU-CC": {}, "MU-CU": {}, "MU-FL": {}, "MU-GP": {}, "MU-MO": {}, + "MU-PA": {}, "MU-PL": {}, "MU-PU": {}, "MU-PW": {}, "MU-QB": {}, + "MU-RO": {}, "MU-RP": {}, "MU-RR": {}, "MU-SA": {}, "MU-VP": {}, "MV-00": {}, + "MV-01": {}, "MV-02": {}, "MV-03": {}, "MV-04": {}, "MV-05": {}, + "MV-07": {}, "MV-08": {}, "MV-12": {}, "MV-13": {}, "MV-14": {}, + "MV-17": {}, "MV-20": {}, "MV-23": {}, "MV-24": {}, "MV-25": {}, + "MV-26": {}, "MV-27": {}, "MV-28": {}, "MV-29": {}, "MV-CE": {}, + "MV-MLE": {}, "MV-NC": {}, "MV-NO": {}, "MV-SC": {}, "MV-SU": {}, + "MV-UN": {}, "MV-US": {}, "MW-BA": {}, "MW-BL": {}, "MW-C": {}, + "MW-CK": {}, "MW-CR": {}, "MW-CT": {}, "MW-DE": {}, "MW-DO": {}, + "MW-KR": {}, "MW-KS": {}, "MW-LI": {}, "MW-LK": {}, "MW-MC": {}, + "MW-MG": {}, "MW-MH": {}, "MW-MU": {}, "MW-MW": {}, "MW-MZ": {}, + "MW-N": {}, "MW-NB": {}, "MW-NE": {}, "MW-NI": {}, "MW-NK": {}, + "MW-NS": {}, "MW-NU": {}, "MW-PH": {}, "MW-RU": {}, "MW-S": {}, + "MW-SA": {}, "MW-TH": {}, "MW-ZO": {}, "MX-AGU": {}, "MX-BCN": {}, + "MX-BCS": {}, "MX-CAM": {}, "MX-CHH": {}, "MX-CHP": {}, "MX-COA": {}, + "MX-COL": {}, "MX-CMX": {}, "MX-DIF": {}, "MX-DUR": {}, "MX-GRO": {}, "MX-GUA": {}, + "MX-HID": {}, "MX-JAL": {}, "MX-MEX": {}, "MX-MIC": {}, "MX-MOR": {}, + "MX-NAY": {}, "MX-NLE": {}, "MX-OAX": {}, "MX-PUE": {}, "MX-QUE": {}, + "MX-ROO": {}, "MX-SIN": {}, "MX-SLP": {}, "MX-SON": {}, "MX-TAB": {}, + "MX-TAM": {}, "MX-TLA": {}, "MX-VER": {}, "MX-YUC": {}, "MX-ZAC": {}, + "MY-01": {}, "MY-02": {}, "MY-03": {}, "MY-04": {}, "MY-05": {}, + "MY-06": {}, "MY-07": {}, "MY-08": {}, "MY-09": {}, "MY-10": {}, + "MY-11": {}, "MY-12": {}, "MY-13": {}, "MY-14": {}, "MY-15": {}, + "MY-16": {}, "MZ-A": {}, "MZ-B": {}, "MZ-G": {}, "MZ-I": {}, + "MZ-L": {}, "MZ-MPM": {}, "MZ-N": {}, "MZ-P": {}, "MZ-Q": {}, + "MZ-S": {}, "MZ-T": {}, "NA-CA": {}, "NA-ER": {}, "NA-HA": {}, + "NA-KA": {}, "NA-KE": {}, "NA-KH": {}, "NA-KU": {}, "NA-KW": {}, "NA-OD": {}, "NA-OH": {}, + "NA-OK": {}, "NA-ON": {}, "NA-OS": {}, "NA-OT": {}, "NA-OW": {}, + "NE-1": {}, "NE-2": {}, "NE-3": {}, "NE-4": {}, "NE-5": {}, + "NE-6": {}, "NE-7": {}, "NE-8": {}, "NG-AB": {}, "NG-AD": {}, + "NG-AK": {}, "NG-AN": {}, "NG-BA": {}, "NG-BE": {}, "NG-BO": {}, + "NG-BY": {}, "NG-CR": {}, "NG-DE": {}, "NG-EB": {}, "NG-ED": {}, + "NG-EK": {}, "NG-EN": {}, "NG-FC": {}, "NG-GO": {}, "NG-IM": {}, + "NG-JI": {}, "NG-KD": {}, "NG-KE": {}, "NG-KN": {}, "NG-KO": {}, + "NG-KT": {}, "NG-KW": {}, "NG-LA": {}, "NG-NA": {}, "NG-NI": {}, + "NG-OG": {}, "NG-ON": {}, "NG-OS": {}, "NG-OY": {}, "NG-PL": {}, + "NG-RI": {}, "NG-SO": {}, "NG-TA": {}, "NG-YO": {}, "NG-ZA": {}, + "NI-AN": {}, "NI-AS": {}, "NI-BO": {}, "NI-CA": {}, "NI-CI": {}, + "NI-CO": {}, "NI-ES": {}, "NI-GR": {}, "NI-JI": {}, "NI-LE": {}, + "NI-MD": {}, "NI-MN": {}, "NI-MS": {}, "NI-MT": {}, "NI-NS": {}, + "NI-RI": {}, "NI-SJ": {}, "NL-AW": {}, "NL-BQ1": {}, "NL-BQ2": {}, + "NL-BQ3": {}, "NL-CW": {}, "NL-DR": {}, "NL-FL": {}, "NL-FR": {}, + "NL-GE": {}, "NL-GR": {}, "NL-LI": {}, "NL-NB": {}, "NL-NH": {}, + "NL-OV": {}, "NL-SX": {}, "NL-UT": {}, "NL-ZE": {}, "NL-ZH": {}, + "NO-03": {}, "NO-11": {}, "NO-15": {}, "NO-16": {}, "NO-17": {}, + "NO-18": {}, "NO-21": {}, "NO-30": {}, "NO-34": {}, "NO-38": {}, + "NO-42": {}, "NO-46": {}, "NO-50": {}, "NO-54": {}, + "NO-22": {}, "NP-1": {}, "NP-2": {}, "NP-3": {}, "NP-4": {}, + "NP-5": {}, "NP-BA": {}, "NP-BH": {}, "NP-DH": {}, "NP-GA": {}, + "NP-JA": {}, "NP-KA": {}, "NP-KO": {}, "NP-LU": {}, "NP-MA": {}, + "NP-ME": {}, "NP-NA": {}, "NP-RA": {}, "NP-SA": {}, "NP-SE": {}, + "NR-01": {}, "NR-02": {}, "NR-03": {}, "NR-04": {}, "NR-05": {}, + "NR-06": {}, "NR-07": {}, "NR-08": {}, "NR-09": {}, "NR-10": {}, + "NR-11": {}, "NR-12": {}, "NR-13": {}, "NR-14": {}, "NZ-AUK": {}, + "NZ-BOP": {}, "NZ-CAN": {}, "NZ-CIT": {}, "NZ-GIS": {}, "NZ-HKB": {}, + "NZ-MBH": {}, "NZ-MWT": {}, "NZ-N": {}, "NZ-NSN": {}, "NZ-NTL": {}, + "NZ-OTA": {}, "NZ-S": {}, "NZ-STL": {}, "NZ-TAS": {}, "NZ-TKI": {}, + "NZ-WGN": {}, "NZ-WKO": {}, "NZ-WTC": {}, "OM-BA": {}, "OM-BS": {}, "OM-BU": {}, "OM-BJ": {}, + "OM-DA": {}, "OM-MA": {}, "OM-MU": {}, "OM-SH": {}, "OM-SJ": {}, "OM-SS": {}, "OM-WU": {}, + "OM-ZA": {}, "OM-ZU": {}, "PA-1": {}, "PA-2": {}, "PA-3": {}, + "PA-4": {}, "PA-5": {}, "PA-6": {}, "PA-7": {}, "PA-8": {}, + "PA-9": {}, "PA-EM": {}, "PA-KY": {}, "PA-NB": {}, "PE-AMA": {}, + "PE-ANC": {}, "PE-APU": {}, "PE-ARE": {}, "PE-AYA": {}, "PE-CAJ": {}, + "PE-CAL": {}, "PE-CUS": {}, "PE-HUC": {}, "PE-HUV": {}, "PE-ICA": {}, + "PE-JUN": {}, "PE-LAL": {}, "PE-LAM": {}, "PE-LIM": {}, "PE-LMA": {}, + "PE-LOR": {}, "PE-MDD": {}, "PE-MOQ": {}, "PE-PAS": {}, "PE-PIU": {}, + "PE-PUN": {}, "PE-SAM": {}, "PE-TAC": {}, "PE-TUM": {}, "PE-UCA": {}, + "PG-CPK": {}, "PG-CPM": {}, "PG-EBR": {}, "PG-EHG": {}, "PG-EPW": {}, + "PG-ESW": {}, "PG-GPK": {}, "PG-MBA": {}, "PG-MPL": {}, "PG-MPM": {}, + "PG-MRL": {}, "PG-NCD": {}, "PG-NIK": {}, "PG-NPP": {}, "PG-NSB": {}, + "PG-SAN": {}, "PG-SHM": {}, "PG-WBK": {}, "PG-WHM": {}, "PG-WPD": {}, + "PH-00": {}, "PH-01": {}, "PH-02": {}, "PH-03": {}, "PH-05": {}, + "PH-06": {}, "PH-07": {}, "PH-08": {}, "PH-09": {}, "PH-10": {}, + "PH-11": {}, "PH-12": {}, "PH-13": {}, "PH-14": {}, "PH-15": {}, + "PH-40": {}, "PH-41": {}, "PH-ABR": {}, "PH-AGN": {}, "PH-AGS": {}, + "PH-AKL": {}, "PH-ALB": {}, "PH-ANT": {}, "PH-APA": {}, "PH-AUR": {}, + "PH-BAN": {}, "PH-BAS": {}, "PH-BEN": {}, "PH-BIL": {}, "PH-BOH": {}, + "PH-BTG": {}, "PH-BTN": {}, "PH-BUK": {}, "PH-BUL": {}, "PH-CAG": {}, + "PH-CAM": {}, "PH-CAN": {}, "PH-CAP": {}, "PH-CAS": {}, "PH-CAT": {}, + "PH-CAV": {}, "PH-CEB": {}, "PH-COM": {}, "PH-DAO": {}, "PH-DAS": {}, + "PH-DAV": {}, "PH-DIN": {}, "PH-EAS": {}, "PH-GUI": {}, "PH-IFU": {}, + "PH-ILI": {}, "PH-ILN": {}, "PH-ILS": {}, "PH-ISA": {}, "PH-KAL": {}, + "PH-LAG": {}, "PH-LAN": {}, "PH-LAS": {}, "PH-LEY": {}, "PH-LUN": {}, + "PH-MAD": {}, "PH-MAG": {}, "PH-MAS": {}, "PH-MDC": {}, "PH-MDR": {}, + "PH-MOU": {}, "PH-MSC": {}, "PH-MSR": {}, "PH-NCO": {}, "PH-NEC": {}, + "PH-NER": {}, "PH-NSA": {}, "PH-NUE": {}, "PH-NUV": {}, "PH-PAM": {}, + "PH-PAN": {}, "PH-PLW": {}, "PH-QUE": {}, "PH-QUI": {}, "PH-RIZ": {}, + "PH-ROM": {}, "PH-SAR": {}, "PH-SCO": {}, "PH-SIG": {}, "PH-SLE": {}, + "PH-SLU": {}, "PH-SOR": {}, "PH-SUK": {}, "PH-SUN": {}, "PH-SUR": {}, + "PH-TAR": {}, "PH-TAW": {}, "PH-WSA": {}, "PH-ZAN": {}, "PH-ZAS": {}, + "PH-ZMB": {}, "PH-ZSI": {}, "PK-BA": {}, "PK-GB": {}, "PK-IS": {}, + "PK-JK": {}, "PK-KP": {}, "PK-PB": {}, "PK-SD": {}, "PK-TA": {}, + "PL-02": {}, "PL-04": {}, "PL-06": {}, "PL-08": {}, "PL-10": {}, + "PL-12": {}, "PL-14": {}, "PL-16": {}, "PL-18": {}, "PL-20": {}, + "PL-22": {}, "PL-24": {}, "PL-26": {}, "PL-28": {}, "PL-30": {}, "PL-32": {}, + "PS-BTH": {}, "PS-DEB": {}, "PS-GZA": {}, "PS-HBN": {}, + "PS-JEM": {}, "PS-JEN": {}, "PS-JRH": {}, "PS-KYS": {}, "PS-NBS": {}, + "PS-NGZ": {}, "PS-QQA": {}, "PS-RBH": {}, "PS-RFH": {}, "PS-SLT": {}, + "PS-TBS": {}, "PS-TKM": {}, "PT-01": {}, "PT-02": {}, "PT-03": {}, + "PT-04": {}, "PT-05": {}, "PT-06": {}, "PT-07": {}, "PT-08": {}, + "PT-09": {}, "PT-10": {}, "PT-11": {}, "PT-12": {}, "PT-13": {}, + "PT-14": {}, "PT-15": {}, "PT-16": {}, "PT-17": {}, "PT-18": {}, + "PT-20": {}, "PT-30": {}, "PW-002": {}, "PW-004": {}, "PW-010": {}, + "PW-050": {}, "PW-100": {}, "PW-150": {}, "PW-212": {}, "PW-214": {}, + "PW-218": {}, "PW-222": {}, "PW-224": {}, "PW-226": {}, "PW-227": {}, + "PW-228": {}, "PW-350": {}, "PW-370": {}, "PY-1": {}, "PY-10": {}, + "PY-11": {}, "PY-12": {}, "PY-13": {}, "PY-14": {}, "PY-15": {}, + "PY-16": {}, "PY-19": {}, "PY-2": {}, "PY-3": {}, "PY-4": {}, + "PY-5": {}, "PY-6": {}, "PY-7": {}, "PY-8": {}, "PY-9": {}, + "PY-ASU": {}, "QA-DA": {}, "QA-KH": {}, "QA-MS": {}, "QA-RA": {}, + "QA-US": {}, "QA-WA": {}, "QA-ZA": {}, "RO-AB": {}, "RO-AG": {}, + "RO-AR": {}, "RO-B": {}, "RO-BC": {}, "RO-BH": {}, "RO-BN": {}, + "RO-BR": {}, "RO-BT": {}, "RO-BV": {}, "RO-BZ": {}, "RO-CJ": {}, + "RO-CL": {}, "RO-CS": {}, "RO-CT": {}, "RO-CV": {}, "RO-DB": {}, + "RO-DJ": {}, "RO-GJ": {}, "RO-GL": {}, "RO-GR": {}, "RO-HD": {}, + "RO-HR": {}, "RO-IF": {}, "RO-IL": {}, "RO-IS": {}, "RO-MH": {}, + "RO-MM": {}, "RO-MS": {}, "RO-NT": {}, "RO-OT": {}, "RO-PH": {}, + "RO-SB": {}, "RO-SJ": {}, "RO-SM": {}, "RO-SV": {}, "RO-TL": {}, + "RO-TM": {}, "RO-TR": {}, "RO-VL": {}, "RO-VN": {}, "RO-VS": {}, + "RS-00": {}, "RS-01": {}, "RS-02": {}, "RS-03": {}, "RS-04": {}, + "RS-05": {}, "RS-06": {}, "RS-07": {}, "RS-08": {}, "RS-09": {}, + "RS-10": {}, "RS-11": {}, "RS-12": {}, "RS-13": {}, "RS-14": {}, + "RS-15": {}, "RS-16": {}, "RS-17": {}, "RS-18": {}, "RS-19": {}, + "RS-20": {}, "RS-21": {}, "RS-22": {}, "RS-23": {}, "RS-24": {}, + "RS-25": {}, "RS-26": {}, "RS-27": {}, "RS-28": {}, "RS-29": {}, + "RS-KM": {}, "RS-VO": {}, "RU-AD": {}, "RU-AL": {}, "RU-ALT": {}, + "RU-AMU": {}, "RU-ARK": {}, "RU-AST": {}, "RU-BA": {}, "RU-BEL": {}, + "RU-BRY": {}, "RU-BU": {}, "RU-CE": {}, "RU-CHE": {}, "RU-CHU": {}, + "RU-CU": {}, "RU-DA": {}, "RU-IN": {}, "RU-IRK": {}, "RU-IVA": {}, + "RU-KAM": {}, "RU-KB": {}, "RU-KC": {}, "RU-KDA": {}, "RU-KEM": {}, + "RU-KGD": {}, "RU-KGN": {}, "RU-KHA": {}, "RU-KHM": {}, "RU-KIR": {}, + "RU-KK": {}, "RU-KL": {}, "RU-KLU": {}, "RU-KO": {}, "RU-KOS": {}, + "RU-KR": {}, "RU-KRS": {}, "RU-KYA": {}, "RU-LEN": {}, "RU-LIP": {}, + "RU-MAG": {}, "RU-ME": {}, "RU-MO": {}, "RU-MOS": {}, "RU-MOW": {}, + "RU-MUR": {}, "RU-NEN": {}, "RU-NGR": {}, "RU-NIZ": {}, "RU-NVS": {}, + "RU-OMS": {}, "RU-ORE": {}, "RU-ORL": {}, "RU-PER": {}, "RU-PNZ": {}, + "RU-PRI": {}, "RU-PSK": {}, "RU-ROS": {}, "RU-RYA": {}, "RU-SA": {}, + "RU-SAK": {}, "RU-SAM": {}, "RU-SAR": {}, "RU-SE": {}, "RU-SMO": {}, + "RU-SPE": {}, "RU-STA": {}, "RU-SVE": {}, "RU-TA": {}, "RU-TAM": {}, + "RU-TOM": {}, "RU-TUL": {}, "RU-TVE": {}, "RU-TY": {}, "RU-TYU": {}, + "RU-UD": {}, "RU-ULY": {}, "RU-VGG": {}, "RU-VLA": {}, "RU-VLG": {}, + "RU-VOR": {}, "RU-YAN": {}, "RU-YAR": {}, "RU-YEV": {}, "RU-ZAB": {}, + "RW-01": {}, "RW-02": {}, "RW-03": {}, "RW-04": {}, "RW-05": {}, + "SA-01": {}, "SA-02": {}, "SA-03": {}, "SA-04": {}, "SA-05": {}, + "SA-06": {}, "SA-07": {}, "SA-08": {}, "SA-09": {}, "SA-10": {}, + "SA-11": {}, "SA-12": {}, "SA-14": {}, "SB-CE": {}, "SB-CH": {}, + "SB-CT": {}, "SB-GU": {}, "SB-IS": {}, "SB-MK": {}, "SB-ML": {}, + "SB-RB": {}, "SB-TE": {}, "SB-WE": {}, "SC-01": {}, "SC-02": {}, + "SC-03": {}, "SC-04": {}, "SC-05": {}, "SC-06": {}, "SC-07": {}, + "SC-08": {}, "SC-09": {}, "SC-10": {}, "SC-11": {}, "SC-12": {}, + "SC-13": {}, "SC-14": {}, "SC-15": {}, "SC-16": {}, "SC-17": {}, + "SC-18": {}, "SC-19": {}, "SC-20": {}, "SC-21": {}, "SC-22": {}, + "SC-23": {}, "SC-24": {}, "SC-25": {}, "SD-DC": {}, "SD-DE": {}, + "SD-DN": {}, "SD-DS": {}, "SD-DW": {}, "SD-GD": {}, "SD-GK": {}, "SD-GZ": {}, + "SD-KA": {}, "SD-KH": {}, "SD-KN": {}, "SD-KS": {}, "SD-NB": {}, + "SD-NO": {}, "SD-NR": {}, "SD-NW": {}, "SD-RS": {}, "SD-SI": {}, + "SE-AB": {}, "SE-AC": {}, "SE-BD": {}, "SE-C": {}, "SE-D": {}, + "SE-E": {}, "SE-F": {}, "SE-G": {}, "SE-H": {}, "SE-I": {}, + "SE-K": {}, "SE-M": {}, "SE-N": {}, "SE-O": {}, "SE-S": {}, + "SE-T": {}, "SE-U": {}, "SE-W": {}, "SE-X": {}, "SE-Y": {}, + "SE-Z": {}, "SG-01": {}, "SG-02": {}, "SG-03": {}, "SG-04": {}, + "SG-05": {}, "SH-AC": {}, "SH-HL": {}, "SH-TA": {}, "SI-001": {}, + "SI-002": {}, "SI-003": {}, "SI-004": {}, "SI-005": {}, "SI-006": {}, + "SI-007": {}, "SI-008": {}, "SI-009": {}, "SI-010": {}, "SI-011": {}, + "SI-012": {}, "SI-013": {}, "SI-014": {}, "SI-015": {}, "SI-016": {}, + "SI-017": {}, "SI-018": {}, "SI-019": {}, "SI-020": {}, "SI-021": {}, + "SI-022": {}, "SI-023": {}, "SI-024": {}, "SI-025": {}, "SI-026": {}, + "SI-027": {}, "SI-028": {}, "SI-029": {}, "SI-030": {}, "SI-031": {}, + "SI-032": {}, "SI-033": {}, "SI-034": {}, "SI-035": {}, "SI-036": {}, + "SI-037": {}, "SI-038": {}, "SI-039": {}, "SI-040": {}, "SI-041": {}, + "SI-042": {}, "SI-043": {}, "SI-044": {}, "SI-045": {}, "SI-046": {}, + "SI-047": {}, "SI-048": {}, "SI-049": {}, "SI-050": {}, "SI-051": {}, + "SI-052": {}, "SI-053": {}, "SI-054": {}, "SI-055": {}, "SI-056": {}, + "SI-057": {}, "SI-058": {}, "SI-059": {}, "SI-060": {}, "SI-061": {}, + "SI-062": {}, "SI-063": {}, "SI-064": {}, "SI-065": {}, "SI-066": {}, + "SI-067": {}, "SI-068": {}, "SI-069": {}, "SI-070": {}, "SI-071": {}, + "SI-072": {}, "SI-073": {}, "SI-074": {}, "SI-075": {}, "SI-076": {}, + "SI-077": {}, "SI-078": {}, "SI-079": {}, "SI-080": {}, "SI-081": {}, + "SI-082": {}, "SI-083": {}, "SI-084": {}, "SI-085": {}, "SI-086": {}, + "SI-087": {}, "SI-088": {}, "SI-089": {}, "SI-090": {}, "SI-091": {}, + "SI-092": {}, "SI-093": {}, "SI-094": {}, "SI-095": {}, "SI-096": {}, + "SI-097": {}, "SI-098": {}, "SI-099": {}, "SI-100": {}, "SI-101": {}, + "SI-102": {}, "SI-103": {}, "SI-104": {}, "SI-105": {}, "SI-106": {}, + "SI-107": {}, "SI-108": {}, "SI-109": {}, "SI-110": {}, "SI-111": {}, + "SI-112": {}, "SI-113": {}, "SI-114": {}, "SI-115": {}, "SI-116": {}, + "SI-117": {}, "SI-118": {}, "SI-119": {}, "SI-120": {}, "SI-121": {}, + "SI-122": {}, "SI-123": {}, "SI-124": {}, "SI-125": {}, "SI-126": {}, + "SI-127": {}, "SI-128": {}, "SI-129": {}, "SI-130": {}, "SI-131": {}, + "SI-132": {}, "SI-133": {}, "SI-134": {}, "SI-135": {}, "SI-136": {}, + "SI-137": {}, "SI-138": {}, "SI-139": {}, "SI-140": {}, "SI-141": {}, + "SI-142": {}, "SI-143": {}, "SI-144": {}, "SI-146": {}, "SI-147": {}, + "SI-148": {}, "SI-149": {}, "SI-150": {}, "SI-151": {}, "SI-152": {}, + "SI-153": {}, "SI-154": {}, "SI-155": {}, "SI-156": {}, "SI-157": {}, + "SI-158": {}, "SI-159": {}, "SI-160": {}, "SI-161": {}, "SI-162": {}, + "SI-163": {}, "SI-164": {}, "SI-165": {}, "SI-166": {}, "SI-167": {}, + "SI-168": {}, "SI-169": {}, "SI-170": {}, "SI-171": {}, "SI-172": {}, + "SI-173": {}, "SI-174": {}, "SI-175": {}, "SI-176": {}, "SI-177": {}, + "SI-178": {}, "SI-179": {}, "SI-180": {}, "SI-181": {}, "SI-182": {}, + "SI-183": {}, "SI-184": {}, "SI-185": {}, "SI-186": {}, "SI-187": {}, + "SI-188": {}, "SI-189": {}, "SI-190": {}, "SI-191": {}, "SI-192": {}, + "SI-193": {}, "SI-194": {}, "SI-195": {}, "SI-196": {}, "SI-197": {}, + "SI-198": {}, "SI-199": {}, "SI-200": {}, "SI-201": {}, "SI-202": {}, + "SI-203": {}, "SI-204": {}, "SI-205": {}, "SI-206": {}, "SI-207": {}, + "SI-208": {}, "SI-209": {}, "SI-210": {}, "SI-211": {}, "SI-212": {}, "SI-213": {}, "SK-BC": {}, + "SK-BL": {}, "SK-KI": {}, "SK-NI": {}, "SK-PV": {}, "SK-TA": {}, + "SK-TC": {}, "SK-ZI": {}, "SL-E": {}, "SL-N": {}, "SL-S": {}, + "SL-W": {}, "SM-01": {}, "SM-02": {}, "SM-03": {}, "SM-04": {}, + "SM-05": {}, "SM-06": {}, "SM-07": {}, "SM-08": {}, "SM-09": {}, + "SN-DB": {}, "SN-DK": {}, "SN-FK": {}, "SN-KA": {}, "SN-KD": {}, + "SN-KE": {}, "SN-KL": {}, "SN-LG": {}, "SN-MT": {}, "SN-SE": {}, + "SN-SL": {}, "SN-TC": {}, "SN-TH": {}, "SN-ZG": {}, "SO-AW": {}, + "SO-BK": {}, "SO-BN": {}, "SO-BR": {}, "SO-BY": {}, "SO-GA": {}, + "SO-GE": {}, "SO-HI": {}, "SO-JD": {}, "SO-JH": {}, "SO-MU": {}, + "SO-NU": {}, "SO-SA": {}, "SO-SD": {}, "SO-SH": {}, "SO-SO": {}, + "SO-TO": {}, "SO-WO": {}, "SR-BR": {}, "SR-CM": {}, "SR-CR": {}, + "SR-MA": {}, "SR-NI": {}, "SR-PM": {}, "SR-PR": {}, "SR-SA": {}, + "SR-SI": {}, "SR-WA": {}, "SS-BN": {}, "SS-BW": {}, "SS-EC": {}, + "SS-EE8": {}, "SS-EE": {}, "SS-EW": {}, "SS-JG": {}, "SS-LK": {}, "SS-NU": {}, + "SS-UY": {}, "SS-WR": {}, "ST-01": {}, "ST-P": {}, "ST-S": {}, "SV-AH": {}, + "SV-CA": {}, "SV-CH": {}, "SV-CU": {}, "SV-LI": {}, "SV-MO": {}, + "SV-PA": {}, "SV-SA": {}, "SV-SM": {}, "SV-SO": {}, "SV-SS": {}, + "SV-SV": {}, "SV-UN": {}, "SV-US": {}, "SY-DI": {}, "SY-DR": {}, + "SY-DY": {}, "SY-HA": {}, "SY-HI": {}, "SY-HL": {}, "SY-HM": {}, + "SY-ID": {}, "SY-LA": {}, "SY-QU": {}, "SY-RA": {}, "SY-RD": {}, + "SY-SU": {}, "SY-TA": {}, "SZ-HH": {}, "SZ-LU": {}, "SZ-MA": {}, + "SZ-SH": {}, "TD-BA": {}, "TD-BG": {}, "TD-BO": {}, "TD-CB": {}, + "TD-EN": {}, "TD-GR": {}, "TD-HL": {}, "TD-KA": {}, "TD-LC": {}, + "TD-LO": {}, "TD-LR": {}, "TD-MA": {}, "TD-MC": {}, "TD-ME": {}, + "TD-MO": {}, "TD-ND": {}, "TD-OD": {}, "TD-SA": {}, "TD-SI": {}, + "TD-TA": {}, "TD-TI": {}, "TD-WF": {}, "TG-C": {}, "TG-K": {}, + "TG-M": {}, "TG-P": {}, "TG-S": {}, "TH-10": {}, "TH-11": {}, + "TH-12": {}, "TH-13": {}, "TH-14": {}, "TH-15": {}, "TH-16": {}, + "TH-17": {}, "TH-18": {}, "TH-19": {}, "TH-20": {}, "TH-21": {}, + "TH-22": {}, "TH-23": {}, "TH-24": {}, "TH-25": {}, "TH-26": {}, + "TH-27": {}, "TH-30": {}, "TH-31": {}, "TH-32": {}, "TH-33": {}, + "TH-34": {}, "TH-35": {}, "TH-36": {}, "TH-37": {}, "TH-38": {}, "TH-39": {}, + "TH-40": {}, "TH-41": {}, "TH-42": {}, "TH-43": {}, "TH-44": {}, + "TH-45": {}, "TH-46": {}, "TH-47": {}, "TH-48": {}, "TH-49": {}, + "TH-50": {}, "TH-51": {}, "TH-52": {}, "TH-53": {}, "TH-54": {}, + "TH-55": {}, "TH-56": {}, "TH-57": {}, "TH-58": {}, "TH-60": {}, + "TH-61": {}, "TH-62": {}, "TH-63": {}, "TH-64": {}, "TH-65": {}, + "TH-66": {}, "TH-67": {}, "TH-70": {}, "TH-71": {}, "TH-72": {}, + "TH-73": {}, "TH-74": {}, "TH-75": {}, "TH-76": {}, "TH-77": {}, + "TH-80": {}, "TH-81": {}, "TH-82": {}, "TH-83": {}, "TH-84": {}, + "TH-85": {}, "TH-86": {}, "TH-90": {}, "TH-91": {}, "TH-92": {}, + "TH-93": {}, "TH-94": {}, "TH-95": {}, "TH-96": {}, "TH-S": {}, + "TJ-GB": {}, "TJ-KT": {}, "TJ-SU": {}, "TJ-DU": {}, "TJ-RA": {}, "TL-AL": {}, "TL-AN": {}, + "TL-BA": {}, "TL-BO": {}, "TL-CO": {}, "TL-DI": {}, "TL-ER": {}, + "TL-LA": {}, "TL-LI": {}, "TL-MF": {}, "TL-MT": {}, "TL-OE": {}, + "TL-VI": {}, "TM-A": {}, "TM-B": {}, "TM-D": {}, "TM-L": {}, + "TM-M": {}, "TM-S": {}, "TN-11": {}, "TN-12": {}, "TN-13": {}, + "TN-14": {}, "TN-21": {}, "TN-22": {}, "TN-23": {}, "TN-31": {}, + "TN-32": {}, "TN-33": {}, "TN-34": {}, "TN-41": {}, "TN-42": {}, + "TN-43": {}, "TN-51": {}, "TN-52": {}, "TN-53": {}, "TN-61": {}, + "TN-71": {}, "TN-72": {}, "TN-73": {}, "TN-81": {}, "TN-82": {}, + "TN-83": {}, "TO-01": {}, "TO-02": {}, "TO-03": {}, "TO-04": {}, + "TO-05": {}, "TR-01": {}, "TR-02": {}, "TR-03": {}, "TR-04": {}, + "TR-05": {}, "TR-06": {}, "TR-07": {}, "TR-08": {}, "TR-09": {}, + "TR-10": {}, "TR-11": {}, "TR-12": {}, "TR-13": {}, "TR-14": {}, + "TR-15": {}, "TR-16": {}, "TR-17": {}, "TR-18": {}, "TR-19": {}, + "TR-20": {}, "TR-21": {}, "TR-22": {}, "TR-23": {}, "TR-24": {}, + "TR-25": {}, "TR-26": {}, "TR-27": {}, "TR-28": {}, "TR-29": {}, + "TR-30": {}, "TR-31": {}, "TR-32": {}, "TR-33": {}, "TR-34": {}, + "TR-35": {}, "TR-36": {}, "TR-37": {}, "TR-38": {}, "TR-39": {}, + "TR-40": {}, "TR-41": {}, "TR-42": {}, "TR-43": {}, "TR-44": {}, + "TR-45": {}, "TR-46": {}, "TR-47": {}, "TR-48": {}, "TR-49": {}, + "TR-50": {}, "TR-51": {}, "TR-52": {}, "TR-53": {}, "TR-54": {}, + "TR-55": {}, "TR-56": {}, "TR-57": {}, "TR-58": {}, "TR-59": {}, + "TR-60": {}, "TR-61": {}, "TR-62": {}, "TR-63": {}, "TR-64": {}, + "TR-65": {}, "TR-66": {}, "TR-67": {}, "TR-68": {}, "TR-69": {}, + "TR-70": {}, "TR-71": {}, "TR-72": {}, "TR-73": {}, "TR-74": {}, + "TR-75": {}, "TR-76": {}, "TR-77": {}, "TR-78": {}, "TR-79": {}, + "TR-80": {}, "TR-81": {}, "TT-ARI": {}, "TT-CHA": {}, "TT-CTT": {}, + "TT-DMN": {}, "TT-ETO": {}, "TT-MRC": {}, "TT-TOB": {}, "TT-PED": {}, "TT-POS": {}, "TT-PRT": {}, + "TT-PTF": {}, "TT-RCM": {}, "TT-SFO": {}, "TT-SGE": {}, "TT-SIP": {}, + "TT-SJL": {}, "TT-TUP": {}, "TT-WTO": {}, "TV-FUN": {}, "TV-NIT": {}, + "TV-NKF": {}, "TV-NKL": {}, "TV-NMA": {}, "TV-NMG": {}, "TV-NUI": {}, + "TV-VAI": {}, "TW-CHA": {}, "TW-CYI": {}, "TW-CYQ": {}, "TW-KIN": {}, "TW-HSQ": {}, + "TW-HSZ": {}, "TW-HUA": {}, "TW-LIE": {}, "TW-ILA": {}, "TW-KEE": {}, "TW-KHH": {}, + "TW-KHQ": {}, "TW-MIA": {}, "TW-NAN": {}, "TW-NWT": {}, "TW-PEN": {}, "TW-PIF": {}, + "TW-TAO": {}, "TW-TNN": {}, "TW-TNQ": {}, "TW-TPE": {}, "TW-TPQ": {}, + "TW-TTT": {}, "TW-TXG": {}, "TW-TXQ": {}, "TW-YUN": {}, "TZ-01": {}, + "TZ-02": {}, "TZ-03": {}, "TZ-04": {}, "TZ-05": {}, "TZ-06": {}, + "TZ-07": {}, "TZ-08": {}, "TZ-09": {}, "TZ-10": {}, "TZ-11": {}, + "TZ-12": {}, "TZ-13": {}, "TZ-14": {}, "TZ-15": {}, "TZ-16": {}, + "TZ-17": {}, "TZ-18": {}, "TZ-19": {}, "TZ-20": {}, "TZ-21": {}, + "TZ-22": {}, "TZ-23": {}, "TZ-24": {}, "TZ-25": {}, "TZ-26": {}, "TZ-27": {}, "TZ-28": {}, "TZ-29": {}, "TZ-30": {}, "TZ-31": {}, + "UA-05": {}, "UA-07": {}, "UA-09": {}, "UA-12": {}, "UA-14": {}, + "UA-18": {}, "UA-21": {}, "UA-23": {}, "UA-26": {}, "UA-30": {}, + "UA-32": {}, "UA-35": {}, "UA-40": {}, "UA-43": {}, "UA-46": {}, + "UA-48": {}, "UA-51": {}, "UA-53": {}, "UA-56": {}, "UA-59": {}, + "UA-61": {}, "UA-63": {}, "UA-65": {}, "UA-68": {}, "UA-71": {}, + "UA-74": {}, "UA-77": {}, "UG-101": {}, "UG-102": {}, "UG-103": {}, + "UG-104": {}, "UG-105": {}, "UG-106": {}, "UG-107": {}, "UG-108": {}, + "UG-109": {}, "UG-110": {}, "UG-111": {}, "UG-112": {}, "UG-113": {}, + "UG-114": {}, "UG-115": {}, "UG-116": {}, "UG-201": {}, "UG-202": {}, + "UG-203": {}, "UG-204": {}, "UG-205": {}, "UG-206": {}, "UG-207": {}, + "UG-208": {}, "UG-209": {}, "UG-210": {}, "UG-211": {}, "UG-212": {}, + "UG-213": {}, "UG-214": {}, "UG-215": {}, "UG-216": {}, "UG-217": {}, + "UG-218": {}, "UG-219": {}, "UG-220": {}, "UG-221": {}, "UG-222": {}, + "UG-223": {}, "UG-224": {}, "UG-301": {}, "UG-302": {}, "UG-303": {}, + "UG-304": {}, "UG-305": {}, "UG-306": {}, "UG-307": {}, "UG-308": {}, + "UG-309": {}, "UG-310": {}, "UG-311": {}, "UG-312": {}, "UG-313": {}, + "UG-314": {}, "UG-315": {}, "UG-316": {}, "UG-317": {}, "UG-318": {}, + "UG-319": {}, "UG-320": {}, "UG-321": {}, "UG-401": {}, "UG-402": {}, + "UG-403": {}, "UG-404": {}, "UG-405": {}, "UG-406": {}, "UG-407": {}, + "UG-408": {}, "UG-409": {}, "UG-410": {}, "UG-411": {}, "UG-412": {}, + "UG-413": {}, "UG-414": {}, "UG-415": {}, "UG-416": {}, "UG-417": {}, + "UG-418": {}, "UG-419": {}, "UG-C": {}, "UG-E": {}, "UG-N": {}, + "UG-W": {}, "UG-322": {}, "UG-323": {}, "UG-420": {}, "UG-117": {}, + "UG-118": {}, "UG-225": {}, "UG-120": {}, "UG-226": {}, + "UG-121": {}, "UG-122": {}, "UG-227": {}, "UG-421": {}, + "UG-325": {}, "UG-228": {}, "UG-123": {}, "UG-422": {}, + "UG-326": {}, "UG-229": {}, "UG-124": {}, "UG-423": {}, + "UG-230": {}, "UG-327": {}, "UG-424": {}, "UG-328": {}, + "UG-425": {}, "UG-426": {}, "UG-330": {}, + "UM-67": {}, "UM-71": {}, "UM-76": {}, "UM-79": {}, + "UM-81": {}, "UM-84": {}, "UM-86": {}, "UM-89": {}, "UM-95": {}, + "US-AK": {}, "US-AL": {}, "US-AR": {}, "US-AS": {}, "US-AZ": {}, + "US-CA": {}, "US-CO": {}, "US-CT": {}, "US-DC": {}, "US-DE": {}, + "US-FL": {}, "US-GA": {}, "US-GU": {}, "US-HI": {}, "US-IA": {}, + "US-ID": {}, "US-IL": {}, "US-IN": {}, "US-KS": {}, "US-KY": {}, + "US-LA": {}, "US-MA": {}, "US-MD": {}, "US-ME": {}, "US-MI": {}, + "US-MN": {}, "US-MO": {}, "US-MP": {}, "US-MS": {}, "US-MT": {}, + "US-NC": {}, "US-ND": {}, "US-NE": {}, "US-NH": {}, "US-NJ": {}, + "US-NM": {}, "US-NV": {}, "US-NY": {}, "US-OH": {}, "US-OK": {}, + "US-OR": {}, "US-PA": {}, "US-PR": {}, "US-RI": {}, "US-SC": {}, + "US-SD": {}, "US-TN": {}, "US-TX": {}, "US-UM": {}, "US-UT": {}, + "US-VA": {}, "US-VI": {}, "US-VT": {}, "US-WA": {}, "US-WI": {}, + "US-WV": {}, "US-WY": {}, "UY-AR": {}, "UY-CA": {}, "UY-CL": {}, + "UY-CO": {}, "UY-DU": {}, "UY-FD": {}, "UY-FS": {}, "UY-LA": {}, + "UY-MA": {}, "UY-MO": {}, "UY-PA": {}, "UY-RN": {}, "UY-RO": {}, + "UY-RV": {}, "UY-SA": {}, "UY-SJ": {}, "UY-SO": {}, "UY-TA": {}, + "UY-TT": {}, "UZ-AN": {}, "UZ-BU": {}, "UZ-FA": {}, "UZ-JI": {}, + "UZ-NG": {}, "UZ-NW": {}, "UZ-QA": {}, "UZ-QR": {}, "UZ-SA": {}, + "UZ-SI": {}, "UZ-SU": {}, "UZ-TK": {}, "UZ-TO": {}, "UZ-XO": {}, + "VC-01": {}, "VC-02": {}, "VC-03": {}, "VC-04": {}, "VC-05": {}, + "VC-06": {}, "VE-A": {}, "VE-B": {}, "VE-C": {}, "VE-D": {}, + "VE-E": {}, "VE-F": {}, "VE-G": {}, "VE-H": {}, "VE-I": {}, + "VE-J": {}, "VE-K": {}, "VE-L": {}, "VE-M": {}, "VE-N": {}, + "VE-O": {}, "VE-P": {}, "VE-R": {}, "VE-S": {}, "VE-T": {}, + "VE-U": {}, "VE-V": {}, "VE-W": {}, "VE-X": {}, "VE-Y": {}, + "VE-Z": {}, "VN-01": {}, "VN-02": {}, "VN-03": {}, "VN-04": {}, + "VN-05": {}, "VN-06": {}, "VN-07": {}, "VN-09": {}, "VN-13": {}, + "VN-14": {}, "VN-15": {}, "VN-18": {}, "VN-20": {}, "VN-21": {}, + "VN-22": {}, "VN-23": {}, "VN-24": {}, "VN-25": {}, "VN-26": {}, + "VN-27": {}, "VN-28": {}, "VN-29": {}, "VN-30": {}, "VN-31": {}, + "VN-32": {}, "VN-33": {}, "VN-34": {}, "VN-35": {}, "VN-36": {}, + "VN-37": {}, "VN-39": {}, "VN-40": {}, "VN-41": {}, "VN-43": {}, + "VN-44": {}, "VN-45": {}, "VN-46": {}, "VN-47": {}, "VN-49": {}, + "VN-50": {}, "VN-51": {}, "VN-52": {}, "VN-53": {}, "VN-54": {}, + "VN-55": {}, "VN-56": {}, "VN-57": {}, "VN-58": {}, "VN-59": {}, + "VN-61": {}, "VN-63": {}, "VN-66": {}, "VN-67": {}, "VN-68": {}, + "VN-69": {}, "VN-70": {}, "VN-71": {}, "VN-72": {}, "VN-73": {}, + "VN-CT": {}, "VN-DN": {}, "VN-HN": {}, "VN-HP": {}, "VN-SG": {}, + "VU-MAP": {}, "VU-PAM": {}, "VU-SAM": {}, "VU-SEE": {}, "VU-TAE": {}, + "VU-TOB": {}, "WF-SG": {}, "WF-UV": {}, "WS-AA": {}, "WS-AL": {}, "WS-AT": {}, "WS-FA": {}, + "WS-GE": {}, "WS-GI": {}, "WS-PA": {}, "WS-SA": {}, "WS-TU": {}, + "WS-VF": {}, "WS-VS": {}, "YE-AB": {}, "YE-AD": {}, "YE-AM": {}, + "YE-BA": {}, "YE-DA": {}, "YE-DH": {}, "YE-HD": {}, "YE-HJ": {}, "YE-HU": {}, + "YE-IB": {}, "YE-JA": {}, "YE-LA": {}, "YE-MA": {}, "YE-MR": {}, + "YE-MU": {}, "YE-MW": {}, "YE-RA": {}, "YE-SA": {}, "YE-SD": {}, "YE-SH": {}, + "YE-SN": {}, "YE-TA": {}, "ZA-EC": {}, "ZA-FS": {}, "ZA-GP": {}, + "ZA-LP": {}, "ZA-MP": {}, "ZA-NC": {}, "ZA-NW": {}, "ZA-WC": {}, + "ZA-ZN": {}, "ZA-KZN": {}, "ZM-01": {}, "ZM-02": {}, "ZM-03": {}, "ZM-04": {}, + "ZM-05": {}, "ZM-06": {}, "ZM-07": {}, "ZM-08": {}, "ZM-09": {}, "ZM-10": {}, + "ZW-BU": {}, "ZW-HA": {}, "ZW-MA": {}, "ZW-MC": {}, "ZW-ME": {}, + "ZW-MI": {}, "ZW-MN": {}, "ZW-MS": {}, "ZW-MV": {}, "ZW-MW": {}, } diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go index a5cd9b18a0a..d0317f89ccb 100644 --- a/vendor/github.com/go-playground/validator/v10/currency_codes.go +++ b/vendor/github.com/go-playground/validator/v10/currency_codes.go @@ -1,79 +1,79 @@ package validator -var iso4217 = map[string]bool{ - "AFN": true, "EUR": true, "ALL": true, "DZD": true, "USD": true, - "AOA": true, "XCD": true, "ARS": true, "AMD": true, "AWG": true, - "AUD": true, "AZN": true, "BSD": true, "BHD": true, "BDT": true, - "BBD": true, "BYN": true, "BZD": true, "XOF": true, "BMD": true, - "INR": true, "BTN": true, "BOB": true, "BOV": true, "BAM": true, - "BWP": true, "NOK": true, "BRL": true, "BND": true, "BGN": true, - "BIF": true, "CVE": true, "KHR": true, "XAF": true, "CAD": true, - "KYD": true, "CLP": true, "CLF": true, "CNY": true, "COP": true, - "COU": true, "KMF": true, "CDF": true, "NZD": true, "CRC": true, - "HRK": true, "CUP": true, "CUC": true, "ANG": true, "CZK": true, - "DKK": true, "DJF": true, "DOP": true, "EGP": true, "SVC": true, - "ERN": true, "SZL": true, "ETB": true, "FKP": true, "FJD": true, - "XPF": true, "GMD": true, "GEL": true, "GHS": true, "GIP": true, - "GTQ": true, "GBP": true, "GNF": true, "GYD": true, "HTG": true, - "HNL": true, "HKD": true, "HUF": true, "ISK": true, "IDR": true, - "XDR": true, "IRR": true, "IQD": true, "ILS": true, "JMD": true, - "JPY": true, "JOD": true, "KZT": true, "KES": true, "KPW": true, - "KRW": true, "KWD": true, "KGS": true, "LAK": true, "LBP": true, - "LSL": true, "ZAR": true, "LRD": true, "LYD": true, "CHF": true, - "MOP": true, "MKD": true, "MGA": true, "MWK": true, "MYR": true, - "MVR": true, "MRU": true, "MUR": true, "XUA": true, "MXN": true, - "MXV": true, "MDL": true, "MNT": true, "MAD": true, "MZN": true, - "MMK": true, "NAD": true, "NPR": true, "NIO": true, "NGN": true, - "OMR": true, "PKR": true, "PAB": true, "PGK": true, "PYG": true, - "PEN": true, "PHP": true, "PLN": true, "QAR": true, "RON": true, - "RUB": true, "RWF": true, "SHP": true, "WST": true, "STN": true, - "SAR": true, "RSD": true, "SCR": true, "SLL": true, "SGD": true, - "XSU": true, "SBD": true, "SOS": true, "SSP": true, "LKR": true, - "SDG": true, "SRD": true, "SEK": true, "CHE": true, "CHW": true, - "SYP": true, "TWD": true, "TJS": true, "TZS": true, "THB": true, - "TOP": true, "TTD": true, "TND": true, "TRY": true, "TMT": true, - "UGX": true, "UAH": true, "AED": true, "USN": true, "UYU": true, - "UYI": true, "UYW": true, "UZS": true, "VUV": true, "VES": true, - "VND": true, "YER": true, "ZMW": true, "ZWL": true, "XBA": true, - "XBB": true, "XBC": true, "XBD": true, "XTS": true, "XXX": true, - "XAU": true, "XPD": true, "XPT": true, "XAG": true, +var iso4217 = map[string]struct{}{ + "AFN": {}, "EUR": {}, "ALL": {}, "DZD": {}, "USD": {}, + "AOA": {}, "XCD": {}, "ARS": {}, "AMD": {}, "AWG": {}, + "AUD": {}, "AZN": {}, "BSD": {}, "BHD": {}, "BDT": {}, + "BBD": {}, "BYN": {}, "BZD": {}, "XOF": {}, "BMD": {}, + "INR": {}, "BTN": {}, "BOB": {}, "BOV": {}, "BAM": {}, + "BWP": {}, "NOK": {}, "BRL": {}, "BND": {}, "BGN": {}, + "BIF": {}, "CVE": {}, "KHR": {}, "XAF": {}, "CAD": {}, + "KYD": {}, "CLP": {}, "CLF": {}, "CNY": {}, "COP": {}, + "COU": {}, "KMF": {}, "CDF": {}, "NZD": {}, "CRC": {}, + "HRK": {}, "CUP": {}, "CUC": {}, "ANG": {}, "CZK": {}, + "DKK": {}, "DJF": {}, "DOP": {}, "EGP": {}, "SVC": {}, + "ERN": {}, "SZL": {}, "ETB": {}, "FKP": {}, "FJD": {}, + "XPF": {}, "GMD": {}, "GEL": {}, "GHS": {}, "GIP": {}, + "GTQ": {}, "GBP": {}, "GNF": {}, "GYD": {}, "HTG": {}, + "HNL": {}, "HKD": {}, "HUF": {}, "ISK": {}, "IDR": {}, + "XDR": {}, "IRR": {}, "IQD": {}, "ILS": {}, "JMD": {}, + "JPY": {}, "JOD": {}, "KZT": {}, "KES": {}, "KPW": {}, + "KRW": {}, "KWD": {}, "KGS": {}, "LAK": {}, "LBP": {}, + "LSL": {}, "ZAR": {}, "LRD": {}, "LYD": {}, "CHF": {}, + "MOP": {}, "MKD": {}, "MGA": {}, "MWK": {}, "MYR": {}, + "MVR": {}, "MRU": {}, "MUR": {}, "XUA": {}, "MXN": {}, + "MXV": {}, "MDL": {}, "MNT": {}, "MAD": {}, "MZN": {}, + "MMK": {}, "NAD": {}, "NPR": {}, "NIO": {}, "NGN": {}, + "OMR": {}, "PKR": {}, "PAB": {}, "PGK": {}, "PYG": {}, + "PEN": {}, "PHP": {}, "PLN": {}, "QAR": {}, "RON": {}, + "RUB": {}, "RWF": {}, "SHP": {}, "WST": {}, "STN": {}, + "SAR": {}, "RSD": {}, "SCR": {}, "SLL": {}, "SGD": {}, + "XSU": {}, "SBD": {}, "SOS": {}, "SSP": {}, "LKR": {}, + "SDG": {}, "SRD": {}, "SEK": {}, "CHE": {}, "CHW": {}, + "SYP": {}, "TWD": {}, "TJS": {}, "TZS": {}, "THB": {}, + "TOP": {}, "TTD": {}, "TND": {}, "TRY": {}, "TMT": {}, + "UGX": {}, "UAH": {}, "AED": {}, "USN": {}, "UYU": {}, + "UYI": {}, "UYW": {}, "UZS": {}, "VUV": {}, "VES": {}, + "VND": {}, "YER": {}, "ZMW": {}, "ZWL": {}, "XBA": {}, + "XBB": {}, "XBC": {}, "XBD": {}, "XTS": {}, "XXX": {}, + "XAU": {}, "XPD": {}, "XPT": {}, "XAG": {}, } -var iso4217_numeric = map[int]bool{ - 8: true, 12: true, 32: true, 36: true, 44: true, - 48: true, 50: true, 51: true, 52: true, 60: true, - 64: true, 68: true, 72: true, 84: true, 90: true, - 96: true, 104: true, 108: true, 116: true, 124: true, - 132: true, 136: true, 144: true, 152: true, 156: true, - 170: true, 174: true, 188: true, 191: true, 192: true, - 203: true, 208: true, 214: true, 222: true, 230: true, - 232: true, 238: true, 242: true, 262: true, 270: true, - 292: true, 320: true, 324: true, 328: true, 332: true, - 340: true, 344: true, 348: true, 352: true, 356: true, - 360: true, 364: true, 368: true, 376: true, 388: true, - 392: true, 398: true, 400: true, 404: true, 408: true, - 410: true, 414: true, 417: true, 418: true, 422: true, - 426: true, 430: true, 434: true, 446: true, 454: true, - 458: true, 462: true, 480: true, 484: true, 496: true, - 498: true, 504: true, 512: true, 516: true, 524: true, - 532: true, 533: true, 548: true, 554: true, 558: true, - 566: true, 578: true, 586: true, 590: true, 598: true, - 600: true, 604: true, 608: true, 634: true, 643: true, - 646: true, 654: true, 682: true, 690: true, 694: true, - 702: true, 704: true, 706: true, 710: true, 728: true, - 748: true, 752: true, 756: true, 760: true, 764: true, - 776: true, 780: true, 784: true, 788: true, 800: true, - 807: true, 818: true, 826: true, 834: true, 840: true, - 858: true, 860: true, 882: true, 886: true, 901: true, - 927: true, 928: true, 929: true, 930: true, 931: true, - 932: true, 933: true, 934: true, 936: true, 938: true, - 940: true, 941: true, 943: true, 944: true, 946: true, - 947: true, 948: true, 949: true, 950: true, 951: true, - 952: true, 953: true, 955: true, 956: true, 957: true, - 958: true, 959: true, 960: true, 961: true, 962: true, - 963: true, 964: true, 965: true, 967: true, 968: true, - 969: true, 970: true, 971: true, 972: true, 973: true, - 975: true, 976: true, 977: true, 978: true, 979: true, - 980: true, 981: true, 984: true, 985: true, 986: true, - 990: true, 994: true, 997: true, 999: true, +var iso4217_numeric = map[int]struct{}{ + 8: {}, 12: {}, 32: {}, 36: {}, 44: {}, + 48: {}, 50: {}, 51: {}, 52: {}, 60: {}, + 64: {}, 68: {}, 72: {}, 84: {}, 90: {}, + 96: {}, 104: {}, 108: {}, 116: {}, 124: {}, + 132: {}, 136: {}, 144: {}, 152: {}, 156: {}, + 170: {}, 174: {}, 188: {}, 191: {}, 192: {}, + 203: {}, 208: {}, 214: {}, 222: {}, 230: {}, + 232: {}, 238: {}, 242: {}, 262: {}, 270: {}, + 292: {}, 320: {}, 324: {}, 328: {}, 332: {}, + 340: {}, 344: {}, 348: {}, 352: {}, 356: {}, + 360: {}, 364: {}, 368: {}, 376: {}, 388: {}, + 392: {}, 398: {}, 400: {}, 404: {}, 408: {}, + 410: {}, 414: {}, 417: {}, 418: {}, 422: {}, + 426: {}, 430: {}, 434: {}, 446: {}, 454: {}, + 458: {}, 462: {}, 480: {}, 484: {}, 496: {}, + 498: {}, 504: {}, 512: {}, 516: {}, 524: {}, + 532: {}, 533: {}, 548: {}, 554: {}, 558: {}, + 566: {}, 578: {}, 586: {}, 590: {}, 598: {}, + 600: {}, 604: {}, 608: {}, 634: {}, 643: {}, + 646: {}, 654: {}, 682: {}, 690: {}, 694: {}, + 702: {}, 704: {}, 706: {}, 710: {}, 728: {}, + 748: {}, 752: {}, 756: {}, 760: {}, 764: {}, + 776: {}, 780: {}, 784: {}, 788: {}, 800: {}, + 807: {}, 818: {}, 826: {}, 834: {}, 840: {}, + 858: {}, 860: {}, 882: {}, 886: {}, 901: {}, + 927: {}, 928: {}, 929: {}, 930: {}, 931: {}, + 932: {}, 933: {}, 934: {}, 936: {}, 938: {}, + 940: {}, 941: {}, 943: {}, 944: {}, 946: {}, + 947: {}, 948: {}, 949: {}, 950: {}, 951: {}, + 952: {}, 953: {}, 955: {}, 956: {}, 957: {}, + 958: {}, 959: {}, 960: {}, 961: {}, 962: {}, + 963: {}, 964: {}, 965: {}, 967: {}, 968: {}, + 969: {}, 970: {}, 971: {}, 972: {}, 973: {}, + 975: {}, 976: {}, 977: {}, 978: {}, 979: {}, + 980: {}, 981: {}, 984: {}, 985: {}, 986: {}, + 990: {}, 994: {}, 997: {}, 999: {}, } diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index b4740918813..90a8ade653c 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -253,7 +253,7 @@ Example #2 This validates that the value is not the data types default zero value. For numbers ensures value is not zero. For strings ensures value is -not "". For slices, maps, pointers, interfaces, channels and functions +not "". For booleans ensures value is not false. For slices, maps, pointers, interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value when using WithRequiredStructEnabled. Usage: required @@ -911,11 +911,20 @@ This will accept any uri the golang request uri accepts # Urn RFC 2141 String -This validataes that a string value contains a valid URN +This validates that a string value contains a valid URN according to the RFC 2141 spec. Usage: urn_rfc2141 +# Base32 String + +This validates that a string value contains a valid bas324 value. +Although an empty string is valid base32 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base32 + # Base64 String This validates that a string value contains a valid base64 value. @@ -957,7 +966,7 @@ Bitcoin Bech32 Address (segwit) This validates that a string value contains a valid bitcoin Bech32 address as defined by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) -Special thanks to Pieter Wuille for providng reference implementations. +Special thanks to Pieter Wuille for providing reference implementations. Usage: btc_addr_bech32 @@ -1290,7 +1299,7 @@ may not exist at the time of validation. # HostPort This validates that a string value contains a valid DNS hostname and port that -can be used to valiate fields typically passed to sockets and connections. +can be used to validate fields typically passed to sockets and connections. Usage: hostname_port @@ -1377,11 +1386,19 @@ This validates that a string value contains a valid credit card number using Luh This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm. -# MongoDb ObjectID +# MongoDB -This validates that a string is a valid 24 character hexadecimal string. +This validates that a string is a valid 24 character hexadecimal string or valid connection string. Usage: mongodb + mongodb_connection_string + +Example: + + type Test struct { + ObjectIdField string `validate:"mongodb"` + ConnectionStringField string `validate:"mongodb_connection_string"` + } # Cron diff --git a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go index e7e7b687f4b..326b8f7538f 100644 --- a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go +++ b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go @@ -1,6 +1,9 @@ package validator -import "regexp" +import ( + "regexp" + "sync" +) var postCodePatternDict = map[string]string{ "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`, @@ -164,9 +167,12 @@ var postCodePatternDict = map[string]string{ "YT": `^976\d{2}$`, } -var postCodeRegexDict = map[string]*regexp.Regexp{} +var ( + postcodeRegexInit sync.Once + postCodeRegexDict = map[string]*regexp.Regexp{} +) -func init() { +func initPostcodes() { for countryCode, pattern := range postCodePatternDict { postCodeRegexDict[countryCode] = regexp.MustCompile(pattern) } diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index af98d8daa62..7e1dd5a03e5 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -1,6 +1,9 @@ package validator -import "regexp" +import ( + "regexp" + "sync" +) const ( alphaRegexString = "^[a-zA-Z]+$" @@ -17,6 +20,7 @@ const ( hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" e164RegexString = "^\\+[1-9]?[0-9]{7,14}$" + base32RegexString = "^(?:[A-Z2-7]{8})*(?:[A-Z2-7]{2}={6}|[A-Z2-7]{4}={4}|[A-Z2-7]{5}={3}|[A-Z2-7]{7}=|[A-Z2-7]{8})$" base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$" @@ -31,7 +35,7 @@ const ( uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" - uLIDRegexString = "^[A-HJKMNP-TV-Z0-9]{26}$" + uLIDRegexString = "^(?i)[A-HJKMNP-TV-Z0-9]{26}$" md4RegexString = "^[0-9a-f]{32}$" md5RegexString = "^[0-9a-f]{32}$" sha256RegexString = "^[0-9a-f]{64}$" @@ -67,79 +71,93 @@ const ( semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/ dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9]){0,62}$" cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html - mongodbRegexString = "^[a-f\\d]{24}$" + mongodbIdRegexString = "^[a-f\\d]{24}$" + mongodbConnStringRegexString = "^mongodb(\\+srv)?:\\/\\/(([a-zA-Z\\d]+):([a-zA-Z\\d$:\\/?#\\[\\]@]+)@)?(([a-z\\d.-]+)(:[\\d]+)?)((,(([a-z\\d.-]+)(:(\\d+))?))*)?(\\/[a-zA-Z-_]{1,64})?(\\?(([a-zA-Z]+)=([a-zA-Z\\d]+))(&(([a-zA-Z\\d]+)=([a-zA-Z\\d]+))?)*)?$" cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})` spicedbIDRegexString = `^(([a-zA-Z0-9/_|\-=+]{1,})|\*)$` spicedbPermissionRegexString = "^([a-z][a-z0-9_]{1,62}[a-z0-9])?$" spicedbTypeRegexString = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$" ) +func lazyRegexCompile(str string) func() *regexp.Regexp { + var regex *regexp.Regexp + var once sync.Once + return func() *regexp.Regexp { + once.Do(func() { + regex = regexp.MustCompile(str) + }) + return regex + } +} + var ( - alphaRegex = regexp.MustCompile(alphaRegexString) - alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) - alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) - alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) - numericRegex = regexp.MustCompile(numericRegexString) - numberRegex = regexp.MustCompile(numberRegexString) - hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) - hexColorRegex = regexp.MustCompile(hexColorRegexString) - rgbRegex = regexp.MustCompile(rgbRegexString) - rgbaRegex = regexp.MustCompile(rgbaRegexString) - hslRegex = regexp.MustCompile(hslRegexString) - hslaRegex = regexp.MustCompile(hslaRegexString) - e164Regex = regexp.MustCompile(e164RegexString) - emailRegex = regexp.MustCompile(emailRegexString) - base64Regex = regexp.MustCompile(base64RegexString) - base64URLRegex = regexp.MustCompile(base64URLRegexString) - base64RawURLRegex = regexp.MustCompile(base64RawURLRegexString) - iSBN10Regex = regexp.MustCompile(iSBN10RegexString) - iSBN13Regex = regexp.MustCompile(iSBN13RegexString) - iSSNRegex = regexp.MustCompile(iSSNRegexString) - uUID3Regex = regexp.MustCompile(uUID3RegexString) - uUID4Regex = regexp.MustCompile(uUID4RegexString) - uUID5Regex = regexp.MustCompile(uUID5RegexString) - uUIDRegex = regexp.MustCompile(uUIDRegexString) - uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) - uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) - uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) - uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) - uLIDRegex = regexp.MustCompile(uLIDRegexString) - md4Regex = regexp.MustCompile(md4RegexString) - md5Regex = regexp.MustCompile(md5RegexString) - sha256Regex = regexp.MustCompile(sha256RegexString) - sha384Regex = regexp.MustCompile(sha384RegexString) - sha512Regex = regexp.MustCompile(sha512RegexString) - ripemd128Regex = regexp.MustCompile(ripemd128RegexString) - ripemd160Regex = regexp.MustCompile(ripemd160RegexString) - tiger128Regex = regexp.MustCompile(tiger128RegexString) - tiger160Regex = regexp.MustCompile(tiger160RegexString) - tiger192Regex = regexp.MustCompile(tiger192RegexString) - aSCIIRegex = regexp.MustCompile(aSCIIRegexString) - printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) - multibyteRegex = regexp.MustCompile(multibyteRegexString) - dataURIRegex = regexp.MustCompile(dataURIRegexString) - latitudeRegex = regexp.MustCompile(latitudeRegexString) - longitudeRegex = regexp.MustCompile(longitudeRegexString) - sSNRegex = regexp.MustCompile(sSNRegexString) - hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) - hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) - fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123) - btcAddressRegex = regexp.MustCompile(btcAddressRegexString) - btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) - btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) - ethAddressRegex = regexp.MustCompile(ethAddressRegexString) - uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) - hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) - hTMLRegex = regexp.MustCompile(hTMLRegexString) - jWTRegex = regexp.MustCompile(jWTRegexString) - splitParamsRegex = regexp.MustCompile(splitParamsRegexString) - bicRegex = regexp.MustCompile(bicRegexString) - semverRegex = regexp.MustCompile(semverRegexString) - dnsRegexRFC1035Label = regexp.MustCompile(dnsRegexStringRFC1035Label) - cveRegex = regexp.MustCompile(cveRegexString) - mongodbRegex = regexp.MustCompile(mongodbRegexString) - cronRegex = regexp.MustCompile(cronRegexString) - spicedbIDRegex = regexp.MustCompile(spicedbIDRegexString) - spicedbPermissionRegex = regexp.MustCompile(spicedbPermissionRegexString) - spicedbTypeRegex = regexp.MustCompile(spicedbTypeRegexString) + alphaRegex = lazyRegexCompile(alphaRegexString) + alphaNumericRegex = lazyRegexCompile(alphaNumericRegexString) + alphaUnicodeRegex = lazyRegexCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = lazyRegexCompile(alphaUnicodeNumericRegexString) + numericRegex = lazyRegexCompile(numericRegexString) + numberRegex = lazyRegexCompile(numberRegexString) + hexadecimalRegex = lazyRegexCompile(hexadecimalRegexString) + hexColorRegex = lazyRegexCompile(hexColorRegexString) + rgbRegex = lazyRegexCompile(rgbRegexString) + rgbaRegex = lazyRegexCompile(rgbaRegexString) + hslRegex = lazyRegexCompile(hslRegexString) + hslaRegex = lazyRegexCompile(hslaRegexString) + e164Regex = lazyRegexCompile(e164RegexString) + emailRegex = lazyRegexCompile(emailRegexString) + base32Regex = lazyRegexCompile(base32RegexString) + base64Regex = lazyRegexCompile(base64RegexString) + base64URLRegex = lazyRegexCompile(base64URLRegexString) + base64RawURLRegex = lazyRegexCompile(base64RawURLRegexString) + iSBN10Regex = lazyRegexCompile(iSBN10RegexString) + iSBN13Regex = lazyRegexCompile(iSBN13RegexString) + iSSNRegex = lazyRegexCompile(iSSNRegexString) + uUID3Regex = lazyRegexCompile(uUID3RegexString) + uUID4Regex = lazyRegexCompile(uUID4RegexString) + uUID5Regex = lazyRegexCompile(uUID5RegexString) + uUIDRegex = lazyRegexCompile(uUIDRegexString) + uUID3RFC4122Regex = lazyRegexCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = lazyRegexCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = lazyRegexCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = lazyRegexCompile(uUIDRFC4122RegexString) + uLIDRegex = lazyRegexCompile(uLIDRegexString) + md4Regex = lazyRegexCompile(md4RegexString) + md5Regex = lazyRegexCompile(md5RegexString) + sha256Regex = lazyRegexCompile(sha256RegexString) + sha384Regex = lazyRegexCompile(sha384RegexString) + sha512Regex = lazyRegexCompile(sha512RegexString) + ripemd128Regex = lazyRegexCompile(ripemd128RegexString) + ripemd160Regex = lazyRegexCompile(ripemd160RegexString) + tiger128Regex = lazyRegexCompile(tiger128RegexString) + tiger160Regex = lazyRegexCompile(tiger160RegexString) + tiger192Regex = lazyRegexCompile(tiger192RegexString) + aSCIIRegex = lazyRegexCompile(aSCIIRegexString) + printableASCIIRegex = lazyRegexCompile(printableASCIIRegexString) + multibyteRegex = lazyRegexCompile(multibyteRegexString) + dataURIRegex = lazyRegexCompile(dataURIRegexString) + latitudeRegex = lazyRegexCompile(latitudeRegexString) + longitudeRegex = lazyRegexCompile(longitudeRegexString) + sSNRegex = lazyRegexCompile(sSNRegexString) + hostnameRegexRFC952 = lazyRegexCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = lazyRegexCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = lazyRegexCompile(fqdnRegexStringRFC1123) + btcAddressRegex = lazyRegexCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = lazyRegexCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = lazyRegexCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = lazyRegexCompile(ethAddressRegexString) + uRLEncodedRegex = lazyRegexCompile(uRLEncodedRegexString) + hTMLEncodedRegex = lazyRegexCompile(hTMLEncodedRegexString) + hTMLRegex = lazyRegexCompile(hTMLRegexString) + jWTRegex = lazyRegexCompile(jWTRegexString) + splitParamsRegex = lazyRegexCompile(splitParamsRegexString) + bicRegex = lazyRegexCompile(bicRegexString) + semverRegex = lazyRegexCompile(semverRegexString) + dnsRegexRFC1035Label = lazyRegexCompile(dnsRegexStringRFC1035Label) + cveRegex = lazyRegexCompile(cveRegexString) + mongodbIdRegex = lazyRegexCompile(mongodbIdRegexString) + mongodbConnectionRegex = lazyRegexCompile(mongodbConnStringRegexString) + cronRegex = lazyRegexCompile(cronRegexString) + spicedbIDRegex = lazyRegexCompile(spicedbIDRegexString) + spicedbPermissionRegex = lazyRegexCompile(spicedbPermissionRegexString) + spicedbTypeRegex = lazyRegexCompile(spicedbTypeRegexString) ) diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index 16851593da5..9285223a2fe 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -271,7 +271,7 @@ func asFloat64(param string) float64 { return i } -// asFloat64 returns the parameter as a float64 +// asFloat32 returns the parameter as a float32 // or panics if it can't convert func asFloat32(param string) float64 { i, err := strconv.ParseFloat(param, 32) @@ -297,7 +297,8 @@ func panicIf(err error) { // Checks if field value matches regex. If fl.Field can be cast to Stringer, it uses the Stringer interfaces // String() return value. Otherwise, it uses fl.Field's String() value. -func fieldMatchesRegexByStringerValOrString(regex *regexp.Regexp, fl FieldLevel) bool { +func fieldMatchesRegexByStringerValOrString(regexFn func() *regexp.Regexp, fl FieldLevel) bool { + regex := regexFn() switch fl.Field().Kind() { case reflect.String: return regex.MatchString(fl.Field().String()) diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index 1a345138ed6..d9f148dbaec 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -74,8 +74,8 @@ type CustomTypeFunc func(field reflect.Value) interface{} type TagNameFunc func(field reflect.StructField) string type internalValidationFuncWrapper struct { - fn FuncCtx - runValidatinOnNil bool + fn FuncCtx + runValidationOnNil bool } // Validate contains the validator settings and cache @@ -245,7 +245,7 @@ func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilC if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { panic(fmt.Sprintf(restrictedTagErr, tag)) } - v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable} + v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidationOnNil: nilCheckable} return nil } @@ -676,7 +676,7 @@ func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string } // VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and -// allows passing of contextual validation validation information via context.Context. +// allows passing of contextual validation information via context.Context. // eg. // s1 := "abcd" // s2 := "abcd" diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebffc37..2ce45dd4eca 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471ff0..b5ab564254f 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223b71..8e6346bb19e 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/google/go-github/v60/AUTHORS b/vendor/github.com/google/go-github/v62/AUTHORS similarity index 99% rename from vendor/github.com/google/go-github/v60/AUTHORS rename to vendor/github.com/google/go-github/v62/AUTHORS index 74a21dc604e..0197b94a23a 100644 --- a/vendor/github.com/google/go-github/v60/AUTHORS +++ b/vendor/github.com/google/go-github/v62/AUTHORS @@ -303,6 +303,7 @@ Matt Gaunt Matt Landis Matt Moore Matt Simons +Matthew Reidy Maxime Bury Michael Meng Michael Spiegel @@ -484,4 +485,4 @@ Zach Latta zhouhaibing089 六开箱 缘生 -蒋航 +蒋航 \ No newline at end of file diff --git a/vendor/github.com/google/go-github/v60/LICENSE b/vendor/github.com/google/go-github/v62/LICENSE similarity index 100% rename from vendor/github.com/google/go-github/v60/LICENSE rename to vendor/github.com/google/go-github/v62/LICENSE diff --git a/vendor/github.com/google/go-github/v60/github/actions.go b/vendor/github.com/google/go-github/v62/github/actions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions.go rename to vendor/github.com/google/go-github/v62/github/actions.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_artifacts.go b/vendor/github.com/google/go-github/v62/github/actions_artifacts.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_artifacts.go rename to vendor/github.com/google/go-github/v62/github/actions_artifacts.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_cache.go b/vendor/github.com/google/go-github/v62/github/actions_cache.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_cache.go rename to vendor/github.com/google/go-github/v62/github/actions_cache.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_oidc.go b/vendor/github.com/google/go-github/v62/github/actions_oidc.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_oidc.go rename to vendor/github.com/google/go-github/v62/github/actions_oidc.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_permissions_enterprise.go b/vendor/github.com/google/go-github/v62/github/actions_permissions_enterprise.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_permissions_enterprise.go rename to vendor/github.com/google/go-github/v62/github/actions_permissions_enterprise.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_permissions_orgs.go b/vendor/github.com/google/go-github/v62/github/actions_permissions_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_permissions_orgs.go rename to vendor/github.com/google/go-github/v62/github/actions_permissions_orgs.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_required_workflows.go b/vendor/github.com/google/go-github/v62/github/actions_required_workflows.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_required_workflows.go rename to vendor/github.com/google/go-github/v62/github/actions_required_workflows.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_runner_groups.go b/vendor/github.com/google/go-github/v62/github/actions_runner_groups.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_runner_groups.go rename to vendor/github.com/google/go-github/v62/github/actions_runner_groups.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_runners.go b/vendor/github.com/google/go-github/v62/github/actions_runners.go similarity index 97% rename from vendor/github.com/google/go-github/v60/github/actions_runners.go rename to vendor/github.com/google/go-github/v62/github/actions_runners.go index c5c90279df5..c4ae48232ec 100644 --- a/vendor/github.com/google/go-github/v60/github/actions_runners.go +++ b/vendor/github.com/google/go-github/v62/github/actions_runners.go @@ -151,12 +151,18 @@ type Runners struct { Runners []*Runner `json:"runners"` } +// ListRunnersOptions specifies the optional parameters to the ListRunners and ListOrganizationRunners methods. +type ListRunnersOptions struct { + Name *string `url:"name,omitempty"` + ListOptions +} + // ListRunners lists all the self-hosted runners for a repository. // // GitHub API docs: https://docs.github.com/rest/actions/self-hosted-runners#list-self-hosted-runners-for-a-repository // //meta:operation GET /repos/{owner}/{repo}/actions/runners -func (s *ActionsService) ListRunners(ctx context.Context, owner, repo string, opts *ListOptions) (*Runners, *Response, error) { +func (s *ActionsService) ListRunners(ctx context.Context, owner, repo string, opts *ListRunnersOptions) (*Runners, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners", owner, repo) u, err := addOptions(u, opts) if err != nil { @@ -290,7 +296,7 @@ func (s *ActionsService) CreateOrganizationRegistrationToken(ctx context.Context // GitHub API docs: https://docs.github.com/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-organization // //meta:operation GET /orgs/{org}/actions/runners -func (s *ActionsService) ListOrganizationRunners(ctx context.Context, org string, opts *ListOptions) (*Runners, *Response, error) { +func (s *ActionsService) ListOrganizationRunners(ctx context.Context, org string, opts *ListRunnersOptions) (*Runners, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners", org) u, err := addOptions(u, opts) if err != nil { diff --git a/vendor/github.com/google/go-github/v60/github/actions_secrets.go b/vendor/github.com/google/go-github/v62/github/actions_secrets.go similarity index 96% rename from vendor/github.com/google/go-github/v60/github/actions_secrets.go rename to vendor/github.com/google/go-github/v62/github/actions_secrets.go index d8d405c06d7..ec519838eab 100644 --- a/vendor/github.com/google/go-github/v60/github/actions_secrets.go +++ b/vendor/github.com/google/go-github/v62/github/actions_secrets.go @@ -84,7 +84,7 @@ func (s *ActionsService) GetOrgPublicKey(ctx context.Context, org string) (*Publ // GetEnvPublicKey gets a public key that should be used for secret encryption. // -// GitHub API docs: https://docs.github.com/rest/actions/secrets#get-an-environment-public-key +// GitHub API docs: https://docs.github.com/enterprise-server@3.7/rest/actions/secrets#get-an-environment-public-key // //meta:operation GET /repositories/{repository_id}/environments/{environment_name}/secrets/public-key func (s *ActionsService) GetEnvPublicKey(ctx context.Context, repoID int, env string) (*PublicKey, *Response, error) { @@ -162,7 +162,7 @@ func (s *ActionsService) ListOrgSecrets(ctx context.Context, org string, opts *L // ListEnvSecrets lists all secrets available in an environment. // -// GitHub API docs: https://docs.github.com/rest/actions/secrets#list-environment-secrets +// GitHub API docs: https://docs.github.com/enterprise-server@3.7/rest/actions/secrets#list-environment-secrets // //meta:operation GET /repositories/{repository_id}/environments/{environment_name}/secrets func (s *ActionsService) ListEnvSecrets(ctx context.Context, repoID int, env string, opts *ListOptions) (*Secrets, *Response, error) { @@ -207,7 +207,7 @@ func (s *ActionsService) GetOrgSecret(ctx context.Context, org, name string) (*S // GetEnvSecret gets a single environment secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/rest/actions/secrets#get-an-environment-secret +// GitHub API docs: https://docs.github.com/enterprise-server@3.7/rest/actions/secrets#get-an-environment-secret // //meta:operation GET /repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name} func (s *ActionsService) GetEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Secret, *Response, error) { @@ -262,7 +262,7 @@ func (s *ActionsService) CreateOrUpdateOrgSecret(ctx context.Context, org string // CreateOrUpdateEnvSecret creates or updates a single environment secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/rest/actions/secrets#create-or-update-an-environment-secret +// GitHub API docs: https://docs.github.com/enterprise-server@3.7/rest/actions/secrets#create-or-update-an-environment-secret // //meta:operation PUT /repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name} func (s *ActionsService) CreateOrUpdateEnvSecret(ctx context.Context, repoID int, env string, eSecret *EncryptedSecret) (*Response, error) { @@ -301,7 +301,7 @@ func (s *ActionsService) DeleteOrgSecret(ctx context.Context, org, name string) // DeleteEnvSecret deletes a secret in an environment using the secret name. // -// GitHub API docs: https://docs.github.com/rest/actions/secrets#delete-an-environment-secret +// GitHub API docs: https://docs.github.com/enterprise-server@3.7/rest/actions/secrets#delete-an-environment-secret // //meta:operation DELETE /repositories/{repository_id}/environments/{environment_name}/secrets/{secret_name} func (s *ActionsService) DeleteEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/actions_variables.go b/vendor/github.com/google/go-github/v62/github/actions_variables.go similarity index 89% rename from vendor/github.com/google/go-github/v60/github/actions_variables.go rename to vendor/github.com/google/go-github/v62/github/actions_variables.go index aa0f23ca48d..bca46b6df6b 100644 --- a/vendor/github.com/google/go-github/v60/github/actions_variables.go +++ b/vendor/github.com/google/go-github/v62/github/actions_variables.go @@ -83,9 +83,9 @@ func (s *ActionsService) ListOrgVariables(ctx context.Context, org string, opts // // GitHub API docs: https://docs.github.com/rest/actions/variables#list-environment-variables // -//meta:operation GET /repositories/{repository_id}/environments/{environment_name}/variables -func (s *ActionsService) ListEnvVariables(ctx context.Context, repoID int, env string, opts *ListOptions) (*ActionsVariables, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) +//meta:operation GET /repos/{owner}/{repo}/environments/{environment_name}/variables +func (s *ActionsService) ListEnvVariables(ctx context.Context, owner, repo, env string, opts *ListOptions) (*ActionsVariables, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/environments/%v/variables", owner, repo, env) return s.listVariables(ctx, url, opts) } @@ -128,9 +128,9 @@ func (s *ActionsService) GetOrgVariable(ctx context.Context, org, name string) ( // // GitHub API docs: https://docs.github.com/rest/actions/variables#get-an-environment-variable // -//meta:operation GET /repositories/{repository_id}/environments/{environment_name}/variables/{name} -func (s *ActionsService) GetEnvVariable(ctx context.Context, repoID int, env, variableName string) (*ActionsVariable, *Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) +//meta:operation GET /repos/{owner}/{repo}/environments/{environment_name}/variables/{name} +func (s *ActionsService) GetEnvVariable(ctx context.Context, owner, repo, env, variableName string) (*ActionsVariable, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/environments/%v/variables/%v", owner, repo, env, variableName) return s.getVariable(ctx, url) } @@ -166,9 +166,9 @@ func (s *ActionsService) CreateOrgVariable(ctx context.Context, org string, vari // // GitHub API docs: https://docs.github.com/rest/actions/variables#create-an-environment-variable // -//meta:operation POST /repositories/{repository_id}/environments/{environment_name}/variables -func (s *ActionsService) CreateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) +//meta:operation POST /repos/{owner}/{repo}/environments/{environment_name}/variables +func (s *ActionsService) CreateEnvVariable(ctx context.Context, owner, repo, env string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/environments/%v/variables", owner, repo, env) return s.postVariable(ctx, url, variable) } @@ -204,9 +204,9 @@ func (s *ActionsService) UpdateOrgVariable(ctx context.Context, org string, vari // // GitHub API docs: https://docs.github.com/rest/actions/variables#update-an-environment-variable // -//meta:operation PATCH /repositories/{repository_id}/environments/{environment_name}/variables/{name} -func (s *ActionsService) UpdateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variable.Name) +//meta:operation PATCH /repos/{owner}/{repo}/environments/{environment_name}/variables/{name} +func (s *ActionsService) UpdateEnvVariable(ctx context.Context, owner, repo, env string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/environments/%v/variables/%v", owner, repo, env, variable.Name) return s.patchVariable(ctx, url, variable) } @@ -243,9 +243,9 @@ func (s *ActionsService) DeleteOrgVariable(ctx context.Context, org, name string // // GitHub API docs: https://docs.github.com/rest/actions/variables#delete-an-environment-variable // -//meta:operation DELETE /repositories/{repository_id}/environments/{environment_name}/variables/{name} -func (s *ActionsService) DeleteEnvVariable(ctx context.Context, repoID int, env, variableName string) (*Response, error) { - url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) +//meta:operation DELETE /repos/{owner}/{repo}/environments/{environment_name}/variables/{name} +func (s *ActionsService) DeleteEnvVariable(ctx context.Context, owner, repo, env, variableName string) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/environments/%v/variables/%v", owner, repo, env, variableName) return s.deleteVariable(ctx, url) } diff --git a/vendor/github.com/google/go-github/v60/github/actions_workflow_jobs.go b/vendor/github.com/google/go-github/v62/github/actions_workflow_jobs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_workflow_jobs.go rename to vendor/github.com/google/go-github/v62/github/actions_workflow_jobs.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v62/github/actions_workflow_runs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_workflow_runs.go rename to vendor/github.com/google/go-github/v62/github/actions_workflow_runs.go diff --git a/vendor/github.com/google/go-github/v60/github/actions_workflows.go b/vendor/github.com/google/go-github/v62/github/actions_workflows.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/actions_workflows.go rename to vendor/github.com/google/go-github/v62/github/actions_workflows.go diff --git a/vendor/github.com/google/go-github/v60/github/activity.go b/vendor/github.com/google/go-github/v62/github/activity.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/activity.go rename to vendor/github.com/google/go-github/v62/github/activity.go diff --git a/vendor/github.com/google/go-github/v60/github/activity_events.go b/vendor/github.com/google/go-github/v62/github/activity_events.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/activity_events.go rename to vendor/github.com/google/go-github/v62/github/activity_events.go diff --git a/vendor/github.com/google/go-github/v60/github/activity_notifications.go b/vendor/github.com/google/go-github/v62/github/activity_notifications.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/activity_notifications.go rename to vendor/github.com/google/go-github/v62/github/activity_notifications.go diff --git a/vendor/github.com/google/go-github/v60/github/activity_star.go b/vendor/github.com/google/go-github/v62/github/activity_star.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/activity_star.go rename to vendor/github.com/google/go-github/v62/github/activity_star.go diff --git a/vendor/github.com/google/go-github/v60/github/activity_watching.go b/vendor/github.com/google/go-github/v62/github/activity_watching.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/activity_watching.go rename to vendor/github.com/google/go-github/v62/github/activity_watching.go diff --git a/vendor/github.com/google/go-github/v60/github/admin.go b/vendor/github.com/google/go-github/v62/github/admin.go similarity index 97% rename from vendor/github.com/google/go-github/v60/github/admin.go rename to vendor/github.com/google/go-github/v62/github/admin.go index c1f7066c7ad..e93c2266bd1 100644 --- a/vendor/github.com/google/go-github/v60/github/admin.go +++ b/vendor/github.com/google/go-github/v62/github/admin.go @@ -82,7 +82,7 @@ func (m Enterprise) String() string { // UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-user +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-user // //meta:operation PATCH /admin/ldap/users/{username}/mapping func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) { @@ -103,7 +103,7 @@ func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, m // UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-team +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-team // //meta:operation PATCH /admin/ldap/teams/{team_id}/mapping func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int64, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/admin_orgs.go b/vendor/github.com/google/go-github/v62/github/admin_orgs.go similarity index 93% rename from vendor/github.com/google/go-github/v60/github/admin_orgs.go rename to vendor/github.com/google/go-github/v62/github/admin_orgs.go index e1b3641e003..cb11fe47f42 100644 --- a/vendor/github.com/google/go-github/v60/github/admin_orgs.go +++ b/vendor/github.com/google/go-github/v62/github/admin_orgs.go @@ -22,7 +22,7 @@ type createOrgRequest struct { // Note that only a subset of the org fields are used and org must // not be nil. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/orgs#create-an-organization +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/orgs#create-an-organization // //meta:operation POST /admin/organizations func (s *AdminService) CreateOrg(ctx context.Context, org *Organization, admin string) (*Organization, *Response, error) { @@ -61,7 +61,7 @@ type RenameOrgResponse struct { // RenameOrg renames an organization in GitHub Enterprise. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/orgs#update-an-organization-name +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/orgs#update-an-organization-name // //meta:operation PATCH /admin/organizations/{org} func (s *AdminService) RenameOrg(ctx context.Context, org *Organization, newName string) (*RenameOrgResponse, *Response, error) { @@ -70,7 +70,7 @@ func (s *AdminService) RenameOrg(ctx context.Context, org *Organization, newName // RenameOrgByName renames an organization in GitHub Enterprise using its current name. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/orgs#update-an-organization-name +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/orgs#update-an-organization-name // //meta:operation PATCH /admin/organizations/{org} func (s *AdminService) RenameOrgByName(ctx context.Context, org, newName string) (*RenameOrgResponse, *Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/admin_stats.go b/vendor/github.com/google/go-github/v62/github/admin_stats.go similarity index 98% rename from vendor/github.com/google/go-github/v60/github/admin_stats.go rename to vendor/github.com/google/go-github/v62/github/admin_stats.go index f26b393e02f..70412625147 100644 --- a/vendor/github.com/google/go-github/v60/github/admin_stats.go +++ b/vendor/github.com/google/go-github/v62/github/admin_stats.go @@ -152,7 +152,7 @@ func (s RepoStats) String() string { // Please note that this is only available to site administrators, // otherwise it will error with a 404 not found (instead of 401 or 403). // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/admin-stats#get-all-statistics +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/admin-stats#get-all-statistics // //meta:operation GET /enterprise/stats/all func (s *AdminService) GetAdminStats(ctx context.Context) (*AdminStats, *Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/admin_users.go b/vendor/github.com/google/go-github/v62/github/admin_users.go similarity index 94% rename from vendor/github.com/google/go-github/v60/github/admin_users.go rename to vendor/github.com/google/go-github/v62/github/admin_users.go index e134ff34dbf..82e568a0ac6 100644 --- a/vendor/github.com/google/go-github/v60/github/admin_users.go +++ b/vendor/github.com/google/go-github/v62/github/admin_users.go @@ -20,7 +20,7 @@ type CreateUserRequest struct { // CreateUser creates a new user in GitHub Enterprise. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#create-a-user +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#create-a-user // //meta:operation POST /admin/users func (s *AdminService) CreateUser(ctx context.Context, userReq CreateUserRequest) (*User, *Response, error) { @@ -42,7 +42,7 @@ func (s *AdminService) CreateUser(ctx context.Context, userReq CreateUserRequest // DeleteUser deletes a user in GitHub Enterprise. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#delete-a-user +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#delete-a-user // //meta:operation DELETE /admin/users/{username} func (s *AdminService) DeleteUser(ctx context.Context, username string) (*Response, error) { @@ -95,7 +95,7 @@ type UserAuthorization struct { // CreateUserImpersonation creates an impersonation OAuth token. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#create-an-impersonation-oauth-token +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#create-an-impersonation-oauth-token // //meta:operation POST /admin/users/{username}/authorizations func (s *AdminService) CreateUserImpersonation(ctx context.Context, username string, opts *ImpersonateUserOptions) (*UserAuthorization, *Response, error) { @@ -117,7 +117,7 @@ func (s *AdminService) CreateUserImpersonation(ctx context.Context, username str // DeleteUserImpersonation deletes an impersonation OAuth token. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#delete-an-impersonation-oauth-token +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#delete-an-impersonation-oauth-token // //meta:operation DELETE /admin/users/{username}/authorizations func (s *AdminService) DeleteUserImpersonation(ctx context.Context, username string) (*Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/apps.go b/vendor/github.com/google/go-github/v62/github/apps.go similarity index 90% rename from vendor/github.com/google/go-github/v60/github/apps.go rename to vendor/github.com/google/go-github/v62/github/apps.go index f0392f2d706..6969daba6c0 100644 --- a/vendor/github.com/google/go-github/v60/github/apps.go +++ b/vendor/github.com/google/go-github/v62/github/apps.go @@ -56,6 +56,20 @@ type InstallationTokenOptions struct { Permissions *InstallationPermissions `json:"permissions,omitempty"` } +type InstallationTokenListRepoOptions struct { + // The IDs of the repositories that the installation token can access. + // Providing repository IDs restricts the access of an installation token to specific repositories. + RepositoryIDs []int64 `json:"repository_ids"` + + // The names of the repositories that the installation token can access. + // Providing repository names restricts the access of an installation token to specific repositories. + Repositories []string `json:"repositories,omitempty"` + + // The permissions granted to the access token. + // The permissions object includes the permission names and their access type. + Permissions *InstallationPermissions `json:"permissions,omitempty"` +} + // InstallationPermissions lists the repository and organization permissions for an installation. // // Permission names taken from: @@ -77,6 +91,7 @@ type InstallationPermissions struct { Metadata *string `json:"metadata,omitempty"` Members *string `json:"members,omitempty"` OrganizationAdministration *string `json:"organization_administration,omitempty"` + OrganizationCustomProperties *string `json:"organization_custom_properties,omitempty"` OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"` OrganizationHooks *string `json:"organization_hooks,omitempty"` OrganizationPackages *string `json:"organization_packages,omitempty"` @@ -343,6 +358,30 @@ func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64, opt return t, resp, nil } +// CreateInstallationTokenListRepos creates a new installation token with a list of all repositories in an installation which is not possible with CreateInstallationToken. +// +// It differs from CreateInstallationToken by taking InstallationTokenListRepoOptions as a parameter which does not omit RepositoryIDs if that field is nil or an empty array. +// +// GitHub API docs: https://docs.github.com/rest/apps/apps#create-an-installation-access-token-for-an-app +// +//meta:operation POST /app/installations/{installation_id}/access_tokens +func (s *AppsService) CreateInstallationTokenListRepos(ctx context.Context, id int64, opts *InstallationTokenListRepoOptions) (*InstallationToken, *Response, error) { + u := fmt.Sprintf("app/installations/%v/access_tokens", id) + + req, err := s.client.NewRequest("POST", u, opts) + if err != nil { + return nil, nil, err + } + + t := new(InstallationToken) + resp, err := s.client.Do(ctx, req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + // CreateAttachment creates a new attachment on user comment containing a url. // // GitHub API docs: https://docs.github.com/enterprise-server@3.3/rest/reference/apps#create-a-content-attachment diff --git a/vendor/github.com/google/go-github/v60/github/apps_hooks.go b/vendor/github.com/google/go-github/v62/github/apps_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/apps_hooks.go rename to vendor/github.com/google/go-github/v62/github/apps_hooks.go diff --git a/vendor/github.com/google/go-github/v60/github/apps_hooks_deliveries.go b/vendor/github.com/google/go-github/v62/github/apps_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/apps_hooks_deliveries.go rename to vendor/github.com/google/go-github/v62/github/apps_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v60/github/apps_installation.go b/vendor/github.com/google/go-github/v62/github/apps_installation.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/apps_installation.go rename to vendor/github.com/google/go-github/v62/github/apps_installation.go diff --git a/vendor/github.com/google/go-github/v60/github/apps_manifest.go b/vendor/github.com/google/go-github/v62/github/apps_manifest.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/apps_manifest.go rename to vendor/github.com/google/go-github/v62/github/apps_manifest.go diff --git a/vendor/github.com/google/go-github/v60/github/apps_marketplace.go b/vendor/github.com/google/go-github/v62/github/apps_marketplace.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/apps_marketplace.go rename to vendor/github.com/google/go-github/v62/github/apps_marketplace.go diff --git a/vendor/github.com/google/go-github/v60/github/authorizations.go b/vendor/github.com/google/go-github/v62/github/authorizations.go similarity index 98% rename from vendor/github.com/google/go-github/v60/github/authorizations.go rename to vendor/github.com/google/go-github/v62/github/authorizations.go index 931b77299b0..9bfff7330ab 100644 --- a/vendor/github.com/google/go-github/v60/github/authorizations.go +++ b/vendor/github.com/google/go-github/v62/github/authorizations.go @@ -257,7 +257,7 @@ func (s *AuthorizationsService) DeleteGrant(ctx context.Context, clientID, acces // you can e.g. create or delete a user's public SSH key. NOTE: creating a // new token automatically revokes an existing one. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#create-an-impersonation-oauth-token +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#create-an-impersonation-oauth-token // //meta:operation POST /admin/users/{username}/authorizations func (s *AuthorizationsService) CreateImpersonation(ctx context.Context, username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) { @@ -279,7 +279,7 @@ func (s *AuthorizationsService) CreateImpersonation(ctx context.Context, usernam // // NOTE: there can be only one at a time. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#delete-an-impersonation-oauth-token +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#delete-an-impersonation-oauth-token // //meta:operation DELETE /admin/users/{username}/authorizations func (s *AuthorizationsService) DeleteImpersonation(ctx context.Context, username string) (*Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/billing.go b/vendor/github.com/google/go-github/v62/github/billing.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/billing.go rename to vendor/github.com/google/go-github/v62/github/billing.go diff --git a/vendor/github.com/google/go-github/v60/github/checks.go b/vendor/github.com/google/go-github/v62/github/checks.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/checks.go rename to vendor/github.com/google/go-github/v62/github/checks.go diff --git a/vendor/github.com/google/go-github/v60/github/code-scanning.go b/vendor/github.com/google/go-github/v62/github/code-scanning.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/code-scanning.go rename to vendor/github.com/google/go-github/v62/github/code-scanning.go diff --git a/vendor/github.com/google/go-github/v60/github/codesofconduct.go b/vendor/github.com/google/go-github/v62/github/codesofconduct.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/codesofconduct.go rename to vendor/github.com/google/go-github/v62/github/codesofconduct.go diff --git a/vendor/github.com/google/go-github/v60/github/codespaces.go b/vendor/github.com/google/go-github/v62/github/codespaces.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/codespaces.go rename to vendor/github.com/google/go-github/v62/github/codespaces.go diff --git a/vendor/github.com/google/go-github/v60/github/codespaces_secrets.go b/vendor/github.com/google/go-github/v62/github/codespaces_secrets.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/codespaces_secrets.go rename to vendor/github.com/google/go-github/v62/github/codespaces_secrets.go diff --git a/vendor/github.com/google/go-github/v60/github/copilot.go b/vendor/github.com/google/go-github/v62/github/copilot.go similarity index 93% rename from vendor/github.com/google/go-github/v60/github/copilot.go rename to vendor/github.com/google/go-github/v62/github/copilot.go index 51c938c9748..2697b71850c 100644 --- a/vendor/github.com/google/go-github/v60/github/copilot.go +++ b/vendor/github.com/google/go-github/v62/github/copilot.go @@ -132,7 +132,7 @@ func (cp *CopilotSeatDetails) GetOrganization() (*Organization, bool) { // GetCopilotBilling gets Copilot for Business billing information and settings for an organization. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#get-copilot-business-seat-information-and-settings-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#get-copilot-seat-information-and-settings-for-an-organization // //meta:operation GET /orgs/{org}/copilot/billing func (s *CopilotService) GetCopilotBilling(ctx context.Context, org string) (*CopilotOrganizationDetails, *Response, error) { @@ -156,13 +156,17 @@ func (s *CopilotService) GetCopilotBilling(ctx context.Context, org string) (*Co // // To paginate through all seats, populate 'Page' with the number of the last page. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#list-all-copilot-business-seat-assignments-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#list-all-copilot-seat-assignments-for-an-organization // //meta:operation GET /orgs/{org}/copilot/billing/seats func (s *CopilotService) ListCopilotSeats(ctx context.Context, org string, opts *ListOptions) (*ListCopilotSeatsResponse, *Response, error) { u := fmt.Sprintf("orgs/%v/copilot/billing/seats", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } - req, err := s.client.NewRequest("GET", u, opts) + req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } @@ -178,7 +182,7 @@ func (s *CopilotService) ListCopilotSeats(ctx context.Context, org string, opts // AddCopilotTeams adds teams to the Copilot for Business subscription for an organization. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#add-teams-to-the-copilot-business-subscription-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#add-teams-to-the-copilot-subscription-for-an-organization // //meta:operation POST /orgs/{org}/copilot/billing/selected_teams func (s *CopilotService) AddCopilotTeams(ctx context.Context, org string, teamNames []string) (*SeatAssignments, *Response, error) { @@ -206,7 +210,7 @@ func (s *CopilotService) AddCopilotTeams(ctx context.Context, org string, teamNa // RemoveCopilotTeams removes teams from the Copilot for Business subscription for an organization. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#remove-teams-from-the-copilot-business-subscription-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#remove-teams-from-the-copilot-subscription-for-an-organization // //meta:operation DELETE /orgs/{org}/copilot/billing/selected_teams func (s *CopilotService) RemoveCopilotTeams(ctx context.Context, org string, teamNames []string) (*SeatCancellations, *Response, error) { @@ -234,7 +238,7 @@ func (s *CopilotService) RemoveCopilotTeams(ctx context.Context, org string, tea // AddCopilotUsers adds users to the Copilot for Business subscription for an organization // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#add-users-to-the-copilot-business-subscription-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#add-users-to-the-copilot-subscription-for-an-organization // //meta:operation POST /orgs/{org}/copilot/billing/selected_users func (s *CopilotService) AddCopilotUsers(ctx context.Context, org string, users []string) (*SeatAssignments, *Response, error) { @@ -262,7 +266,7 @@ func (s *CopilotService) AddCopilotUsers(ctx context.Context, org string, users // RemoveCopilotUsers removes users from the Copilot for Business subscription for an organization. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#remove-users-from-the-copilot-business-subscription-for-an-organization +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#remove-users-from-the-copilot-subscription-for-an-organization // //meta:operation DELETE /orgs/{org}/copilot/billing/selected_users func (s *CopilotService) RemoveCopilotUsers(ctx context.Context, org string, users []string) (*SeatCancellations, *Response, error) { @@ -290,7 +294,7 @@ func (s *CopilotService) RemoveCopilotUsers(ctx context.Context, org string, use // GetSeatDetails gets Copilot for Business seat assignment details for a user. // -// GitHub API docs: https://docs.github.com/rest/copilot/copilot-business#get-copilot-business-seat-assignment-details-for-a-user +// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#get-copilot-seat-assignment-details-for-a-user // //meta:operation GET /orgs/{org}/members/{username}/copilot func (s *CopilotService) GetSeatDetails(ctx context.Context, org, user string) (*CopilotSeatDetails, *Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/dependabot.go b/vendor/github.com/google/go-github/v62/github/dependabot.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/dependabot.go rename to vendor/github.com/google/go-github/v62/github/dependabot.go diff --git a/vendor/github.com/google/go-github/v60/github/dependabot_alerts.go b/vendor/github.com/google/go-github/v62/github/dependabot_alerts.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/dependabot_alerts.go rename to vendor/github.com/google/go-github/v62/github/dependabot_alerts.go diff --git a/vendor/github.com/google/go-github/v60/github/dependabot_secrets.go b/vendor/github.com/google/go-github/v62/github/dependabot_secrets.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/dependabot_secrets.go rename to vendor/github.com/google/go-github/v62/github/dependabot_secrets.go diff --git a/vendor/github.com/google/go-github/v60/github/dependency_graph.go b/vendor/github.com/google/go-github/v62/github/dependency_graph.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/dependency_graph.go rename to vendor/github.com/google/go-github/v62/github/dependency_graph.go diff --git a/vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go b/vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go new file mode 100644 index 00000000000..0606b981510 --- /dev/null +++ b/vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go @@ -0,0 +1,113 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// DependencyGraphSnapshotResolvedDependency represents a resolved dependency in a dependency graph snapshot. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotResolvedDependency struct { + PackageURL *string `json:"package_url,omitempty"` + // Represents whether the dependency is requested directly by the manifest or is a dependency of another dependency. + // Can have the following values: + // - "direct": indicates that the dependency is requested directly by the manifest. + // - "indirect": indicates that the dependency is a dependency of another dependency. + Relationship *string `json:"relationship,omitempty"` + // Represents whether the dependency is required for the primary build artifact or is only used for development. + // Can have the following values: + // - "runtime": indicates that the dependency is required for the primary build artifact. + // - "development": indicates that the dependency is only used for development. + Scope *string `json:"scope,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` +} + +// DependencyGraphSnapshotJob represents the job that created the snapshot. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotJob struct { + Correlator *string `json:"correlator,omitempty"` + ID *string `json:"id,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` +} + +// DependencyGraphSnapshotDetector represents a description of the detector used. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotDetector struct { + Name *string `json:"name,omitempty"` + Version *string `json:"version,omitempty"` + URL *string `json:"url,omitempty"` +} + +// DependencyGraphSnapshotManifestFile represents the file declaring the repository's dependencies. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotManifestFile struct { + SourceLocation *string `json:"source_location,omitempty"` +} + +// DependencyGraphSnapshotManifest represents a collection of related dependencies declared in a file or representing a logical group of dependencies. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotManifest struct { + Name *string `json:"name,omitempty"` + File *DependencyGraphSnapshotManifestFile `json:"file,omitempty"` + Resolved map[string]*DependencyGraphSnapshotResolvedDependency `json:"resolved,omitempty"` +} + +// DependencyGraphSnapshot represent a snapshot of a repository's dependencies. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshot struct { + Version int `json:"version"` + Sha *string `json:"sha,omitempty"` + Ref *string `json:"ref,omitempty"` + Job *DependencyGraphSnapshotJob `json:"job,omitempty"` + Detector *DependencyGraphSnapshotDetector `json:"detector,omitempty"` + Scanned *Timestamp `json:"scanned,omitempty"` + Manifests map[string]*DependencyGraphSnapshotManifest `json:"manifests,omitempty"` +} + +// DependencyGraphSnapshotCreationData represents the dependency snapshot's creation result. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +type DependencyGraphSnapshotCreationData struct { + ID int64 `json:"id"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Message *string `json:"message,omitempty"` + // Represents the snapshot creation result. + // Can have the following values: + // - "SUCCESS": indicates that the snapshot was successfully created and the repository's dependencies were updated. + // - "ACCEPTED": indicates that the snapshot was successfully created, but the repository's dependencies were not updated. + // - "INVALID": indicates that the snapshot was malformed. + Result *string `json:"result,omitempty"` +} + +// CreateSnapshot creates a new snapshot of a repository's dependencies. +// +// GitHub API docs: https://docs.github.com/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository +// +//meta:operation POST /repos/{owner}/{repo}/dependency-graph/snapshots +func (s *DependencyGraphService) CreateSnapshot(ctx context.Context, owner, repo string, dependencyGraphSnapshot *DependencyGraphSnapshot) (*DependencyGraphSnapshotCreationData, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/dependency-graph/snapshots", owner, repo) + + req, err := s.client.NewRequest("POST", url, dependencyGraphSnapshot) + if err != nil { + return nil, nil, err + } + + var snapshotCreationData *DependencyGraphSnapshotCreationData + resp, err := s.client.Do(ctx, req, &snapshotCreationData) + if err != nil { + return nil, resp, err + } + + return snapshotCreationData, resp, nil +} diff --git a/vendor/github.com/google/go-github/v60/github/doc.go b/vendor/github.com/google/go-github/v62/github/doc.go similarity index 99% rename from vendor/github.com/google/go-github/v60/github/doc.go rename to vendor/github.com/google/go-github/v62/github/doc.go index 1896244d7f0..e0c810871f2 100644 --- a/vendor/github.com/google/go-github/v60/github/doc.go +++ b/vendor/github.com/google/go-github/v62/github/doc.go @@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API. Usage: - import "github.com/google/go-github/v60/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) + import "github.com/google/go-github/v62/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) import "github.com/google/go-github/github" // with go modules disabled Construct a new GitHub client, then use the various services on the client to diff --git a/vendor/github.com/google/go-github/v60/github/emojis.go b/vendor/github.com/google/go-github/v62/github/emojis.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/emojis.go rename to vendor/github.com/google/go-github/v62/github/emojis.go diff --git a/vendor/github.com/google/go-github/v60/github/enterprise.go b/vendor/github.com/google/go-github/v62/github/enterprise.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/enterprise.go rename to vendor/github.com/google/go-github/v62/github/enterprise.go diff --git a/vendor/github.com/google/go-github/v60/github/enterprise_actions_runner_groups.go b/vendor/github.com/google/go-github/v62/github/enterprise_actions_runner_groups.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/enterprise_actions_runner_groups.go rename to vendor/github.com/google/go-github/v62/github/enterprise_actions_runner_groups.go diff --git a/vendor/github.com/google/go-github/v60/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v62/github/enterprise_actions_runners.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/enterprise_actions_runners.go rename to vendor/github.com/google/go-github/v62/github/enterprise_actions_runners.go diff --git a/vendor/github.com/google/go-github/v60/github/enterprise_audit_log.go b/vendor/github.com/google/go-github/v62/github/enterprise_audit_log.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/enterprise_audit_log.go rename to vendor/github.com/google/go-github/v62/github/enterprise_audit_log.go diff --git a/vendor/github.com/google/go-github/v60/github/enterprise_code_security_and_analysis.go b/vendor/github.com/google/go-github/v62/github/enterprise_code_security_and_analysis.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/enterprise_code_security_and_analysis.go rename to vendor/github.com/google/go-github/v62/github/enterprise_code_security_and_analysis.go diff --git a/vendor/github.com/google/go-github/v60/github/event.go b/vendor/github.com/google/go-github/v62/github/event.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/event.go rename to vendor/github.com/google/go-github/v62/github/event.go diff --git a/vendor/github.com/google/go-github/v60/github/event_types.go b/vendor/github.com/google/go-github/v62/github/event_types.go similarity index 98% rename from vendor/github.com/google/go-github/v60/github/event_types.go rename to vendor/github.com/google/go-github/v62/github/event_types.go index b820dddf4a7..e5ae33a5fa0 100644 --- a/vendor/github.com/google/go-github/v60/github/event_types.go +++ b/vendor/github.com/google/go-github/v62/github/event_types.go @@ -232,6 +232,7 @@ type DeploymentProtectionRuleEvent struct { // // GitHub API docs: https://docs.github.com/developers/webhooks-and-events/webhook-events-and-payloads#deployment_status type DeploymentStatusEvent struct { + Action *string `json:"action,omitempty"` Deployment *Deployment `json:"deployment,omitempty"` DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"` Repo *Repository `json:"repository,omitempty"` @@ -689,14 +690,34 @@ type MarketplacePurchaseEvent struct { Org *Organization `json:"organization,omitempty"` } -// MemberEvent is triggered when a user is added as a collaborator to a repository. +// MemberChangesPermission represents changes to a repository collaborator's permissions. +type MemberChangesPermission struct { + From *string `json:"from,omitempty"` + To *string `json:"to,omitempty"` +} + +// MemberChangesRoleName represents changes to a repository collaborator's role. +type MemberChangesRoleName struct { + From *string `json:"from,omitempty"` + To *string `json:"to,omitempty"` +} + +// MemberChanges represents changes to a repository collaborator's role or permission. +type MemberChanges struct { + Permission *MemberChangesPermission `json:"permission,omitempty"` + RoleName *MemberChangesRoleName `json:"role_name,omitempty"` +} + +// MemberEvent is triggered when a user's membership as a collaborator to a repository changes. // The Webhook event name is "member". // // GitHub API docs: https://docs.github.com/developers/webhooks-and-events/webhook-events-and-payloads#member type MemberEvent struct { - // Action is the action that was performed. Possible value is: "added". - Action *string `json:"action,omitempty"` - Member *User `json:"member,omitempty"` + // Action is the action that was performed. Possible values are: + //"added", "edited", "removed". + Action *string `json:"action,omitempty"` + Member *User `json:"member,omitempty"` + Changes *MemberChanges `json:"changes,omitempty"` // The following fields are only populated by Webhook events. Repo *Repository `json:"repository,omitempty"` diff --git a/vendor/github.com/google/go-github/v60/github/gists.go b/vendor/github.com/google/go-github/v62/github/gists.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/gists.go rename to vendor/github.com/google/go-github/v62/github/gists.go diff --git a/vendor/github.com/google/go-github/v60/github/gists_comments.go b/vendor/github.com/google/go-github/v62/github/gists_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/gists_comments.go rename to vendor/github.com/google/go-github/v62/github/gists_comments.go diff --git a/vendor/github.com/google/go-github/v60/github/git.go b/vendor/github.com/google/go-github/v62/github/git.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git.go rename to vendor/github.com/google/go-github/v62/github/git.go diff --git a/vendor/github.com/google/go-github/v60/github/git_blobs.go b/vendor/github.com/google/go-github/v62/github/git_blobs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git_blobs.go rename to vendor/github.com/google/go-github/v62/github/git_blobs.go diff --git a/vendor/github.com/google/go-github/v60/github/git_commits.go b/vendor/github.com/google/go-github/v62/github/git_commits.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git_commits.go rename to vendor/github.com/google/go-github/v62/github/git_commits.go diff --git a/vendor/github.com/google/go-github/v60/github/git_refs.go b/vendor/github.com/google/go-github/v62/github/git_refs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git_refs.go rename to vendor/github.com/google/go-github/v62/github/git_refs.go diff --git a/vendor/github.com/google/go-github/v60/github/git_tags.go b/vendor/github.com/google/go-github/v62/github/git_tags.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git_tags.go rename to vendor/github.com/google/go-github/v62/github/git_tags.go diff --git a/vendor/github.com/google/go-github/v60/github/git_trees.go b/vendor/github.com/google/go-github/v62/github/git_trees.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/git_trees.go rename to vendor/github.com/google/go-github/v62/github/git_trees.go diff --git a/vendor/github.com/google/go-github/v60/github/github-accessors.go b/vendor/github.com/google/go-github/v62/github/github-accessors.go similarity index 98% rename from vendor/github.com/google/go-github/v60/github/github-accessors.go rename to vendor/github.com/google/go-github/v62/github/github-accessors.go index 89728d38df9..fd2fe22591d 100644 --- a/vendor/github.com/google/go-github/v60/github/github-accessors.go +++ b/vendor/github.com/google/go-github/v62/github/github-accessors.go @@ -1574,6 +1574,14 @@ func (b *Branch) GetProtected() bool { return *b.Protected } +// GetProtection returns the Protection field. +func (b *Branch) GetProtection() *Protection { + if b == nil { + return nil + } + return b.Protection +} + // GetCommit returns the Commit field. func (b *BranchCommit) GetCommit() *Commit { if b == nil { @@ -5198,6 +5206,166 @@ func (d *Dependency) GetScope() string { return *d.Scope } +// GetDetector returns the Detector field. +func (d *DependencyGraphSnapshot) GetDetector() *DependencyGraphSnapshotDetector { + if d == nil { + return nil + } + return d.Detector +} + +// GetJob returns the Job field. +func (d *DependencyGraphSnapshot) GetJob() *DependencyGraphSnapshotJob { + if d == nil { + return nil + } + return d.Job +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshot) GetRef() string { + if d == nil || d.Ref == nil { + return "" + } + return *d.Ref +} + +// GetScanned returns the Scanned field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshot) GetScanned() Timestamp { + if d == nil || d.Scanned == nil { + return Timestamp{} + } + return *d.Scanned +} + +// GetSha returns the Sha field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshot) GetSha() string { + if d == nil || d.Sha == nil { + return "" + } + return *d.Sha +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotCreationData) GetCreatedAt() Timestamp { + if d == nil || d.CreatedAt == nil { + return Timestamp{} + } + return *d.CreatedAt +} + +// GetMessage returns the Message field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotCreationData) GetMessage() string { + if d == nil || d.Message == nil { + return "" + } + return *d.Message +} + +// GetResult returns the Result field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotCreationData) GetResult() string { + if d == nil || d.Result == nil { + return "" + } + return *d.Result +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotDetector) GetName() string { + if d == nil || d.Name == nil { + return "" + } + return *d.Name +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotDetector) GetURL() string { + if d == nil || d.URL == nil { + return "" + } + return *d.URL +} + +// GetVersion returns the Version field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotDetector) GetVersion() string { + if d == nil || d.Version == nil { + return "" + } + return *d.Version +} + +// GetCorrelator returns the Correlator field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotJob) GetCorrelator() string { + if d == nil || d.Correlator == nil { + return "" + } + return *d.Correlator +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotJob) GetHTMLURL() string { + if d == nil || d.HTMLURL == nil { + return "" + } + return *d.HTMLURL +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotJob) GetID() string { + if d == nil || d.ID == nil { + return "" + } + return *d.ID +} + +// GetFile returns the File field. +func (d *DependencyGraphSnapshotManifest) GetFile() *DependencyGraphSnapshotManifestFile { + if d == nil { + return nil + } + return d.File +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotManifest) GetName() string { + if d == nil || d.Name == nil { + return "" + } + return *d.Name +} + +// GetSourceLocation returns the SourceLocation field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotManifestFile) GetSourceLocation() string { + if d == nil || d.SourceLocation == nil { + return "" + } + return *d.SourceLocation +} + +// GetPackageURL returns the PackageURL field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotResolvedDependency) GetPackageURL() string { + if d == nil || d.PackageURL == nil { + return "" + } + return *d.PackageURL +} + +// GetRelationship returns the Relationship field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotResolvedDependency) GetRelationship() string { + if d == nil || d.Relationship == nil { + return "" + } + return *d.Relationship +} + +// GetScope returns the Scope field if it's non-nil, zero value otherwise. +func (d *DependencyGraphSnapshotResolvedDependency) GetScope() string { + if d == nil || d.Scope == nil { + return "" + } + return *d.Scope +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (d *DeployKeyEvent) GetAction() string { if d == nil || d.Action == nil { @@ -5710,6 +5878,14 @@ func (d *DeploymentStatus) GetURL() string { return *d.URL } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (d *DeploymentStatusEvent) GetAction() string { + if d == nil || d.Action == nil { + return "" + } + return *d.Action +} + // GetDeployment returns the Deployment field. func (d *DeploymentStatusEvent) GetDeployment() *Deployment { if d == nil { @@ -8678,6 +8854,14 @@ func (i *InstallationPermissions) GetOrganizationAdministration() string { return *i.OrganizationAdministration } +// GetOrganizationCustomProperties returns the OrganizationCustomProperties field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationCustomProperties() string { + if i == nil || i.OrganizationCustomProperties == nil { + return "" + } + return *i.OrganizationCustomProperties +} + // GetOrganizationCustomRoles returns the OrganizationCustomRoles field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetOrganizationCustomRoles() string { if i == nil || i.OrganizationCustomRoles == nil { @@ -9046,6 +9230,14 @@ func (i *InstallationToken) GetToken() string { return *i.Token } +// GetPermissions returns the Permissions field. +func (i *InstallationTokenListRepoOptions) GetPermissions() *InstallationPermissions { + if i == nil { + return nil + } + return i.Permissions +} + // GetPermissions returns the Permissions field. func (i *InstallationTokenOptions) GetPermissions() *InstallationPermissions { if i == nil { @@ -10742,6 +10934,14 @@ func (l *ListRepositories) GetTotalCount() int { return *l.TotalCount } +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (l *ListRunnersOptions) GetName() string { + if l == nil || l.Name == nil { + return "" + } + return *l.Name +} + // GetCount returns the Count field if it's non-nil, zero value otherwise. func (l *ListSCIMProvisionedIdentitiesOptions) GetCount() int { if l == nil || l.Count == nil { @@ -11190,6 +11390,54 @@ func (m *Match) GetText() string { return *m.Text } +// GetPermission returns the Permission field. +func (m *MemberChanges) GetPermission() *MemberChangesPermission { + if m == nil { + return nil + } + return m.Permission +} + +// GetRoleName returns the RoleName field. +func (m *MemberChanges) GetRoleName() *MemberChangesRoleName { + if m == nil { + return nil + } + return m.RoleName +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (m *MemberChangesPermission) GetFrom() string { + if m == nil || m.From == nil { + return "" + } + return *m.From +} + +// GetTo returns the To field if it's non-nil, zero value otherwise. +func (m *MemberChangesPermission) GetTo() string { + if m == nil || m.To == nil { + return "" + } + return *m.To +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (m *MemberChangesRoleName) GetFrom() string { + if m == nil || m.From == nil { + return "" + } + return *m.From +} + +// GetTo returns the To field if it's non-nil, zero value otherwise. +func (m *MemberChangesRoleName) GetTo() string { + if m == nil || m.To == nil { + return "" + } + return *m.To +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (m *MemberEvent) GetAction() string { if m == nil || m.Action == nil { @@ -11198,6 +11446,14 @@ func (m *MemberEvent) GetAction() string { return *m.Action } +// GetChanges returns the Changes field. +func (m *MemberEvent) GetChanges() *MemberChanges { + if m == nil { + return nil + } + return m.Changes +} + // GetInstallation returns the Installation field. func (m *MemberEvent) GetInstallation() *Installation { if m == nil { @@ -11966,6 +12222,14 @@ func (n *NewTeam) GetLDAPDN() string { return *n.LDAPDN } +// GetNotificationSetting returns the NotificationSetting field if it's non-nil, zero value otherwise. +func (n *NewTeam) GetNotificationSetting() string { + if n == nil || n.NotificationSetting == nil { + return "" + } + return *n.NotificationSetting +} + // GetParentTeamID returns the ParentTeamID field if it's non-nil, zero value otherwise. func (n *NewTeam) GetParentTeamID() int64 { if n == nil || n.ParentTeamID == nil { @@ -17534,6 +17798,14 @@ func (r *RateLimits) GetActionsRunnerRegistration() *Rate { return r.ActionsRunnerRegistration } +// GetAuditLog returns the AuditLog field. +func (r *RateLimits) GetAuditLog() *Rate { + if r == nil { + return nil + } + return r.AuditLog +} + // GetCodeScanningUpload returns the CodeScanningUpload field. func (r *RateLimits) GetCodeScanningUpload() *Rate { if r == nil { diff --git a/vendor/github.com/google/go-github/v60/github/github.go b/vendor/github.com/google/go-github/v62/github/github.go similarity index 95% rename from vendor/github.com/google/go-github/v60/github/github.go rename to vendor/github.com/google/go-github/v62/github/github.go index 05c3acd55b7..28a3c10bb28 100644 --- a/vendor/github.com/google/go-github/v60/github/github.go +++ b/vendor/github.com/google/go-github/v62/github/github.go @@ -28,7 +28,7 @@ import ( ) const ( - Version = "v60.0.0" + Version = "v62.0.0" defaultAPIVersion = "2022-11-28" defaultBaseURL = "https://api.github.com/" @@ -170,7 +170,7 @@ type Client struct { UserAgent string rateMu sync.Mutex - rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls. + rateLimits [Categories]Rate // Rate limits for the client as determined by the most recent API calls. secondaryRateLimitReset time.Time // Secondary rate limit reset for the client as determined by the most recent API calls. common service // Reuse a single struct instead of allocating one for each service on the heap. @@ -804,6 +804,7 @@ type requestContext uint8 const ( bypassRateLimitCheck requestContext = iota + SleepUntilPrimaryRateLimitResetWhenRateLimited ) // BareDo sends an API request and lets you handle the api response. If an error @@ -821,7 +822,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro req = withContext(ctx, req) - rateLimitCategory := category(req.Method, req.URL.Path) + rateLimitCategory := GetRateLimitCategory(req.Method, req.URL.Path) if bypass := ctx.Value(bypassRateLimitCheck); bypass == nil { // If we've hit rate limit, don't make further requests before Reset time. @@ -889,6 +890,15 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro err = aerr } + rateLimitError, ok := err.(*RateLimitError) + if ok && req.Context().Value(SleepUntilPrimaryRateLimitResetWhenRateLimited) != nil { + if err := sleepUntilResetWithBuffer(req.Context(), rateLimitError.Rate.Reset.Time); err != nil { + return response, err + } + // retry the request once when the rate limit has reset + return c.BareDo(context.WithValue(req.Context(), SleepUntilPrimaryRateLimitResetWhenRateLimited, nil), req) + } + // Update the secondary rate limit if we hit it. rerr, ok := err.(*AbuseRateLimitError) if ok && rerr.RetryAfter != nil { @@ -937,7 +947,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res // current client state in order to quickly check if *RateLimitError can be immediately returned // from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. // Otherwise it returns nil, and Client.Do should proceed normally. -func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) *RateLimitError { +func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory RateLimitCategory) *RateLimitError { c.rateMu.Lock() rate := c.rateLimits[rateLimitCategory] c.rateMu.Unlock() @@ -950,6 +960,18 @@ func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rat Header: make(http.Header), Body: io.NopCloser(strings.NewReader("")), } + + if req.Context().Value(SleepUntilPrimaryRateLimitResetWhenRateLimited) != nil { + if err := sleepUntilResetWithBuffer(req.Context(), rate.Reset.Time); err == nil { + return nil + } + return &RateLimitError{ + Rate: rate, + Response: resp, + Message: fmt.Sprintf("Context cancelled while waiting for rate limit to reset until %v, not making remote request.", rate.Reset.Time), + } + } + return &RateLimitError{ Rate: rate, Response: resp, @@ -1303,65 +1325,70 @@ func parseBoolResponse(err error) (bool, error) { return false, err } -type rateLimitCategory uint8 +type RateLimitCategory uint8 const ( - coreCategory rateLimitCategory = iota - searchCategory - graphqlCategory - integrationManifestCategory - sourceImportCategory - codeScanningUploadCategory - actionsRunnerRegistrationCategory - scimCategory - dependencySnapshotsCategory - codeSearchCategory - - categories // An array of this length will be able to contain all rate limit categories. + CoreCategory RateLimitCategory = iota + SearchCategory + GraphqlCategory + IntegrationManifestCategory + SourceImportCategory + CodeScanningUploadCategory + ActionsRunnerRegistrationCategory + ScimCategory + DependencySnapshotsCategory + CodeSearchCategory + AuditLogCategory + + Categories // An array of this length will be able to contain all rate limit categories. ) -// category returns the rate limit category of the endpoint, determined by HTTP method and Request.URL.Path. -func category(method, path string) rateLimitCategory { +// GetRateLimitCategory returns the rate limit RateLimitCategory of the endpoint, determined by HTTP method and Request.URL.Path. +func GetRateLimitCategory(method, path string) RateLimitCategory { switch { // https://docs.github.com/rest/rate-limit#about-rate-limits default: // NOTE: coreCategory is returned for actionsRunnerRegistrationCategory too, // because no API found for this category. - return coreCategory + return CoreCategory // https://docs.github.com/en/rest/search/search#search-code case strings.HasPrefix(path, "/search/code") && method == http.MethodGet: - return codeSearchCategory + return CodeSearchCategory case strings.HasPrefix(path, "/search/"): - return searchCategory + return SearchCategory case path == "/graphql": - return graphqlCategory + return GraphqlCategory case strings.HasPrefix(path, "/app-manifests/") && strings.HasSuffix(path, "/conversions") && method == http.MethodPost: - return integrationManifestCategory + return IntegrationManifestCategory // https://docs.github.com/rest/migrations/source-imports#start-an-import case strings.HasPrefix(path, "/repos/") && strings.HasSuffix(path, "/import") && method == http.MethodPut: - return sourceImportCategory + return SourceImportCategory // https://docs.github.com/rest/code-scanning#upload-an-analysis-as-sarif-data case strings.HasSuffix(path, "/code-scanning/sarifs"): - return codeScanningUploadCategory + return CodeScanningUploadCategory // https://docs.github.com/enterprise-cloud@latest/rest/scim case strings.HasPrefix(path, "/scim/"): - return scimCategory + return ScimCategory // https://docs.github.com/en/rest/dependency-graph/dependency-submission#create-a-snapshot-of-dependencies-for-a-repository case strings.HasPrefix(path, "/repos/") && strings.HasSuffix(path, "/dependency-graph/snapshots") && method == http.MethodPost: - return dependencySnapshotsCategory + return DependencySnapshotsCategory + + // https://docs.github.com/en/enterprise-cloud@latest/rest/orgs/orgs?apiVersion=2022-11-28#get-the-audit-log-for-an-organization + case strings.HasSuffix(path, "/audit-log"): + return AuditLogCategory } } @@ -1509,6 +1536,20 @@ func formatRateReset(d time.Duration) string { return fmt.Sprintf("[rate reset in %v]", timeString) } +func sleepUntilResetWithBuffer(ctx context.Context, reset time.Time) error { + buffer := time.Second + timer := time.NewTimer(time.Until(reset) + buffer) + select { + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + } + return nil +} + // When using roundTripWithOptionalFollowRedirect, note that it // is the responsibility of the caller to close the response body. func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u string, maxRedirects int, opts ...RequestOption) (*http.Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/gitignore.go b/vendor/github.com/google/go-github/v62/github/gitignore.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/gitignore.go rename to vendor/github.com/google/go-github/v62/github/gitignore.go diff --git a/vendor/github.com/google/go-github/v60/github/interactions.go b/vendor/github.com/google/go-github/v62/github/interactions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/interactions.go rename to vendor/github.com/google/go-github/v62/github/interactions.go diff --git a/vendor/github.com/google/go-github/v60/github/interactions_orgs.go b/vendor/github.com/google/go-github/v62/github/interactions_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/interactions_orgs.go rename to vendor/github.com/google/go-github/v62/github/interactions_orgs.go diff --git a/vendor/github.com/google/go-github/v60/github/interactions_repos.go b/vendor/github.com/google/go-github/v62/github/interactions_repos.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/interactions_repos.go rename to vendor/github.com/google/go-github/v62/github/interactions_repos.go diff --git a/vendor/github.com/google/go-github/v60/github/issue_import.go b/vendor/github.com/google/go-github/v62/github/issue_import.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issue_import.go rename to vendor/github.com/google/go-github/v62/github/issue_import.go diff --git a/vendor/github.com/google/go-github/v60/github/issues.go b/vendor/github.com/google/go-github/v62/github/issues.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues.go rename to vendor/github.com/google/go-github/v62/github/issues.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_assignees.go b/vendor/github.com/google/go-github/v62/github/issues_assignees.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_assignees.go rename to vendor/github.com/google/go-github/v62/github/issues_assignees.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_comments.go b/vendor/github.com/google/go-github/v62/github/issues_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_comments.go rename to vendor/github.com/google/go-github/v62/github/issues_comments.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_events.go b/vendor/github.com/google/go-github/v62/github/issues_events.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_events.go rename to vendor/github.com/google/go-github/v62/github/issues_events.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_labels.go b/vendor/github.com/google/go-github/v62/github/issues_labels.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_labels.go rename to vendor/github.com/google/go-github/v62/github/issues_labels.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_milestones.go b/vendor/github.com/google/go-github/v62/github/issues_milestones.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_milestones.go rename to vendor/github.com/google/go-github/v62/github/issues_milestones.go diff --git a/vendor/github.com/google/go-github/v60/github/issues_timeline.go b/vendor/github.com/google/go-github/v62/github/issues_timeline.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/issues_timeline.go rename to vendor/github.com/google/go-github/v62/github/issues_timeline.go diff --git a/vendor/github.com/google/go-github/v60/github/licenses.go b/vendor/github.com/google/go-github/v62/github/licenses.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/licenses.go rename to vendor/github.com/google/go-github/v62/github/licenses.go diff --git a/vendor/github.com/google/go-github/v60/github/markdown.go b/vendor/github.com/google/go-github/v62/github/markdown.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/markdown.go rename to vendor/github.com/google/go-github/v62/github/markdown.go diff --git a/vendor/github.com/google/go-github/v60/github/messages.go b/vendor/github.com/google/go-github/v62/github/messages.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/messages.go rename to vendor/github.com/google/go-github/v62/github/messages.go diff --git a/vendor/github.com/google/go-github/v60/github/meta.go b/vendor/github.com/google/go-github/v62/github/meta.go similarity index 88% rename from vendor/github.com/google/go-github/v60/github/meta.go rename to vendor/github.com/google/go-github/v62/github/meta.go index 1da8fcf1326..e0e355e8cd0 100644 --- a/vendor/github.com/google/go-github/v60/github/meta.go +++ b/vendor/github.com/google/go-github/v62/github/meta.go @@ -17,11 +17,11 @@ type MetaService service // APIMeta represents metadata about the GitHub API. type APIMeta struct { - // An Array of IP addresses in CIDR format specifying the addresses + // An array of IP addresses in CIDR format specifying the addresses // that incoming service hooks will originate from on GitHub.com. Hooks []string `json:"hooks,omitempty"` - // An Array of IP addresses in CIDR format specifying the Git servers + // An array of IP addresses in CIDR format specifying the Git servers // for GitHub.com. Git []string `json:"git,omitempty"` @@ -40,10 +40,14 @@ type APIMeta struct { // which serve GitHub Pages websites. Pages []string `json:"pages,omitempty"` - // An Array of IP addresses specifying the addresses that source imports + // An array of IP addresses specifying the addresses that source imports // will originate from on GitHub.com. Importer []string `json:"importer,omitempty"` + // An array of IP addresses specifying the addresses that source imports + // will originate from on GitHub Enterprise Cloud. + GithubEnterpriseImporter []string `json:"github_enterprise_importer,omitempty"` + // An array of IP addresses in CIDR format specifying the IP addresses // GitHub Actions will originate from. Actions []string `json:"actions,omitempty"` @@ -65,6 +69,10 @@ type APIMeta struct { // An array of IP addresses in CIDR format specifying the addresses // which serve GitHub APIs. API []string `json:"api,omitempty"` + + // A map of GitHub services and their associated domains. Note that many + // of these domains are represented as wildcards (e.g. "*.github.com"). + Domains map[string][]string `json:"domains,omitempty"` } // Get returns information about GitHub.com, the service. Or, if you access diff --git a/vendor/github.com/google/go-github/v60/github/migrations.go b/vendor/github.com/google/go-github/v62/github/migrations.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/migrations.go rename to vendor/github.com/google/go-github/v62/github/migrations.go diff --git a/vendor/github.com/google/go-github/v60/github/migrations_source_import.go b/vendor/github.com/google/go-github/v62/github/migrations_source_import.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/migrations_source_import.go rename to vendor/github.com/google/go-github/v62/github/migrations_source_import.go diff --git a/vendor/github.com/google/go-github/v60/github/migrations_user.go b/vendor/github.com/google/go-github/v62/github/migrations_user.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/migrations_user.go rename to vendor/github.com/google/go-github/v62/github/migrations_user.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs.go b/vendor/github.com/google/go-github/v62/github/orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs.go rename to vendor/github.com/google/go-github/v62/github/orgs.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v62/github/orgs_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_actions_allowed.go rename to vendor/github.com/google/go-github/v62/github/orgs_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v62/github/orgs_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_actions_permissions.go rename to vendor/github.com/google/go-github/v62/github/orgs_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v62/github/orgs_audit_log.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_audit_log.go rename to vendor/github.com/google/go-github/v62/github/orgs_audit_log.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_credential_authorizations.go b/vendor/github.com/google/go-github/v62/github/orgs_credential_authorizations.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_credential_authorizations.go rename to vendor/github.com/google/go-github/v62/github/orgs_credential_authorizations.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go similarity index 72% rename from vendor/github.com/google/go-github/v60/github/orgs_custom_roles.go rename to vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go index 45de896a2f9..e1deeb73ed4 100644 --- a/vendor/github.com/google/go-github/v60/github/orgs_custom_roles.go +++ b/vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go @@ -126,3 +126,57 @@ func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org, ro return resp, nil } + +// ListTeamsAssignedToOrgRole returns all teams assigned to a specific organization role. +// In order to list teams assigned to an organization role, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-teams-that-are-assigned-to-an-organization-role +// +//meta:operation GET /orgs/{org}/organization-roles/{role_id}/teams +func (s *OrganizationsService) ListTeamsAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*Team, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v/teams", org, roleID) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var teams []*Team + resp, err := s.client.Do(ctx, req, &teams) + if err != nil { + return nil, resp, err + } + + return teams, resp, nil +} + +// ListUsersAssignedToOrgRole returns all users assigned to a specific organization role. +// In order to list users assigned to an organization role, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-users-that-are-assigned-to-an-organization-role +// +//meta:operation GET /orgs/{org}/organization-roles/{role_id}/users +func (s *OrganizationsService) ListUsersAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*User, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v/users", org, roleID) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var users []*User + resp, err := s.client.Do(ctx, req, &users) + if err != nil { + return nil, resp, err + } + + return users, resp, nil +} diff --git a/vendor/github.com/google/go-github/v60/github/orgs_hooks.go b/vendor/github.com/google/go-github/v62/github/orgs_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_hooks.go rename to vendor/github.com/google/go-github/v62/github/orgs_hooks.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_hooks_configuration.go b/vendor/github.com/google/go-github/v62/github/orgs_hooks_configuration.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_hooks_configuration.go rename to vendor/github.com/google/go-github/v62/github/orgs_hooks_configuration.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_hooks_deliveries.go b/vendor/github.com/google/go-github/v62/github/orgs_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_hooks_deliveries.go rename to vendor/github.com/google/go-github/v62/github/orgs_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_members.go b/vendor/github.com/google/go-github/v62/github/orgs_members.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_members.go rename to vendor/github.com/google/go-github/v62/github/orgs_members.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/v62/github/orgs_outside_collaborators.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_outside_collaborators.go rename to vendor/github.com/google/go-github/v62/github/orgs_outside_collaborators.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_packages.go b/vendor/github.com/google/go-github/v62/github/orgs_packages.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_packages.go rename to vendor/github.com/google/go-github/v62/github/orgs_packages.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_personal_access_tokens.go b/vendor/github.com/google/go-github/v62/github/orgs_personal_access_tokens.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_personal_access_tokens.go rename to vendor/github.com/google/go-github/v62/github/orgs_personal_access_tokens.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_projects.go b/vendor/github.com/google/go-github/v62/github/orgs_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_projects.go rename to vendor/github.com/google/go-github/v62/github/orgs_projects.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_properties.go b/vendor/github.com/google/go-github/v62/github/orgs_properties.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_properties.go rename to vendor/github.com/google/go-github/v62/github/orgs_properties.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_rules.go b/vendor/github.com/google/go-github/v62/github/orgs_rules.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_rules.go rename to vendor/github.com/google/go-github/v62/github/orgs_rules.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_security_managers.go b/vendor/github.com/google/go-github/v62/github/orgs_security_managers.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_security_managers.go rename to vendor/github.com/google/go-github/v62/github/orgs_security_managers.go diff --git a/vendor/github.com/google/go-github/v60/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/v62/github/orgs_users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/orgs_users_blocking.go rename to vendor/github.com/google/go-github/v62/github/orgs_users_blocking.go diff --git a/vendor/github.com/google/go-github/v60/github/packages.go b/vendor/github.com/google/go-github/v62/github/packages.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/packages.go rename to vendor/github.com/google/go-github/v62/github/packages.go diff --git a/vendor/github.com/google/go-github/v60/github/projects.go b/vendor/github.com/google/go-github/v62/github/projects.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/projects.go rename to vendor/github.com/google/go-github/v62/github/projects.go diff --git a/vendor/github.com/google/go-github/v60/github/pulls.go b/vendor/github.com/google/go-github/v62/github/pulls.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/pulls.go rename to vendor/github.com/google/go-github/v62/github/pulls.go diff --git a/vendor/github.com/google/go-github/v60/github/pulls_comments.go b/vendor/github.com/google/go-github/v62/github/pulls_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/pulls_comments.go rename to vendor/github.com/google/go-github/v62/github/pulls_comments.go diff --git a/vendor/github.com/google/go-github/v60/github/pulls_reviewers.go b/vendor/github.com/google/go-github/v62/github/pulls_reviewers.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/pulls_reviewers.go rename to vendor/github.com/google/go-github/v62/github/pulls_reviewers.go diff --git a/vendor/github.com/google/go-github/v60/github/pulls_reviews.go b/vendor/github.com/google/go-github/v62/github/pulls_reviews.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/pulls_reviews.go rename to vendor/github.com/google/go-github/v62/github/pulls_reviews.go diff --git a/vendor/github.com/google/go-github/v60/github/pulls_threads.go b/vendor/github.com/google/go-github/v62/github/pulls_threads.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/pulls_threads.go rename to vendor/github.com/google/go-github/v62/github/pulls_threads.go diff --git a/vendor/github.com/google/go-github/v60/github/rate_limit.go b/vendor/github.com/google/go-github/v62/github/rate_limit.go similarity index 81% rename from vendor/github.com/google/go-github/v60/github/rate_limit.go rename to vendor/github.com/google/go-github/v62/github/rate_limit.go index febe5edccfb..5b01b573d8a 100644 --- a/vendor/github.com/google/go-github/v60/github/rate_limit.go +++ b/vendor/github.com/google/go-github/v62/github/rate_limit.go @@ -54,6 +54,7 @@ type RateLimits struct { SCIM *Rate `json:"scim"` DependencySnapshots *Rate `json:"dependency_snapshots"` CodeSearch *Rate `json:"code_search"` + AuditLog *Rate `json:"audit_log"` } func (r RateLimits) String() string { @@ -85,34 +86,37 @@ func (s *RateLimitService) Get(ctx context.Context) (*RateLimits, *Response, err if response.Resources != nil { s.client.rateMu.Lock() if response.Resources.Core != nil { - s.client.rateLimits[coreCategory] = *response.Resources.Core + s.client.rateLimits[CoreCategory] = *response.Resources.Core } if response.Resources.Search != nil { - s.client.rateLimits[searchCategory] = *response.Resources.Search + s.client.rateLimits[SearchCategory] = *response.Resources.Search } if response.Resources.GraphQL != nil { - s.client.rateLimits[graphqlCategory] = *response.Resources.GraphQL + s.client.rateLimits[GraphqlCategory] = *response.Resources.GraphQL } if response.Resources.IntegrationManifest != nil { - s.client.rateLimits[integrationManifestCategory] = *response.Resources.IntegrationManifest + s.client.rateLimits[IntegrationManifestCategory] = *response.Resources.IntegrationManifest } if response.Resources.SourceImport != nil { - s.client.rateLimits[sourceImportCategory] = *response.Resources.SourceImport + s.client.rateLimits[SourceImportCategory] = *response.Resources.SourceImport } if response.Resources.CodeScanningUpload != nil { - s.client.rateLimits[codeScanningUploadCategory] = *response.Resources.CodeScanningUpload + s.client.rateLimits[CodeScanningUploadCategory] = *response.Resources.CodeScanningUpload } if response.Resources.ActionsRunnerRegistration != nil { - s.client.rateLimits[actionsRunnerRegistrationCategory] = *response.Resources.ActionsRunnerRegistration + s.client.rateLimits[ActionsRunnerRegistrationCategory] = *response.Resources.ActionsRunnerRegistration } if response.Resources.SCIM != nil { - s.client.rateLimits[scimCategory] = *response.Resources.SCIM + s.client.rateLimits[ScimCategory] = *response.Resources.SCIM } if response.Resources.DependencySnapshots != nil { - s.client.rateLimits[dependencySnapshotsCategory] = *response.Resources.DependencySnapshots + s.client.rateLimits[DependencySnapshotsCategory] = *response.Resources.DependencySnapshots } if response.Resources.CodeSearch != nil { - s.client.rateLimits[codeSearchCategory] = *response.Resources.CodeSearch + s.client.rateLimits[CodeSearchCategory] = *response.Resources.CodeSearch + } + if response.Resources.AuditLog != nil { + s.client.rateLimits[AuditLogCategory] = *response.Resources.AuditLog } s.client.rateMu.Unlock() } diff --git a/vendor/github.com/google/go-github/v60/github/reactions.go b/vendor/github.com/google/go-github/v62/github/reactions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/reactions.go rename to vendor/github.com/google/go-github/v62/github/reactions.go diff --git a/vendor/github.com/google/go-github/v60/github/repos.go b/vendor/github.com/google/go-github/v62/github/repos.go similarity index 98% rename from vendor/github.com/google/go-github/v60/github/repos.go rename to vendor/github.com/google/go-github/v62/github/repos.go index 2fb4c6f190a..630ce748c94 100644 --- a/vendor/github.com/google/go-github/v60/github/repos.go +++ b/vendor/github.com/google/go-github/v62/github/repos.go @@ -1003,6 +1003,14 @@ type Branch struct { Name *string `json:"name,omitempty"` Commit *RepositoryCommit `json:"commit,omitempty"` Protected *bool `json:"protected,omitempty"` + + // Protection will always be included in APIs which return the + // 'Branch With Protection' schema such as 'Get a branch', but may + // not be included in APIs that return the `Short Branch` schema + // such as 'List branches'. In such cases, if branch protection is + // enabled, Protected will be `true` but this will be nil, and + // additional protection details can be obtained by calling GetBranch(). + Protection *Protection `json:"protection,omitempty"` } // Protection represents a repository branch's protection. @@ -2394,3 +2402,27 @@ func (s *RepositoriesService) DisablePrivateReporting(ctx context.Context, owner return resp, nil } + +// checkPrivateReporting represents whether private vulnerability reporting is enabled. +type checkPrivateReporting struct { + Enabled bool `json:"enabled,omitempty"` +} + +// IsPrivateReportingEnabled checks if private vulnerability reporting is enabled +// for the repository and returns a boolean indicating the status. +// +// GitHub API docs: https://docs.github.com/rest/repos/repos#check-if-private-vulnerability-reporting-is-enabled-for-a-repository +// +//meta:operation GET /repos/{owner}/{repo}/private-vulnerability-reporting +func (s *RepositoriesService) IsPrivateReportingEnabled(ctx context.Context, owner, repo string) (bool, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/private-vulnerability-reporting", owner, repo) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return false, nil, err + } + + privateReporting := new(checkPrivateReporting) + resp, err := s.client.Do(ctx, req, privateReporting) + return privateReporting.Enabled, resp, err +} diff --git a/vendor/github.com/google/go-github/v60/github/repos_actions_access.go b/vendor/github.com/google/go-github/v62/github/repos_actions_access.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_actions_access.go rename to vendor/github.com/google/go-github/v62/github/repos_actions_access.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_actions_allowed.go b/vendor/github.com/google/go-github/v62/github/repos_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_actions_allowed.go rename to vendor/github.com/google/go-github/v62/github/repos_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v62/github/repos_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_actions_permissions.go rename to vendor/github.com/google/go-github/v62/github/repos_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_autolinks.go b/vendor/github.com/google/go-github/v62/github/repos_autolinks.go similarity index 99% rename from vendor/github.com/google/go-github/v60/github/repos_autolinks.go rename to vendor/github.com/google/go-github/v62/github/repos_autolinks.go index 200605aa0b2..6c209b2d5e8 100644 --- a/vendor/github.com/google/go-github/v60/github/repos_autolinks.go +++ b/vendor/github.com/google/go-github/v62/github/repos_autolinks.go @@ -28,7 +28,7 @@ type Autolink struct { // ListAutolinks returns a list of autolinks configured for the given repository. // Information about autolinks are only available to repository administrators. // -// GitHub API docs: https://docs.github.com/rest/repos/autolinks#list-all-autolinks-of-a-repository +// GitHub API docs: https://docs.github.com/rest/repos/autolinks#get-all-autolinks-of-a-repository // //meta:operation GET /repos/{owner}/{repo}/autolinks func (s *RepositoriesService) ListAutolinks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Autolink, *Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/repos_codeowners.go b/vendor/github.com/google/go-github/v62/github/repos_codeowners.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_codeowners.go rename to vendor/github.com/google/go-github/v62/github/repos_codeowners.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_collaborators.go b/vendor/github.com/google/go-github/v62/github/repos_collaborators.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_collaborators.go rename to vendor/github.com/google/go-github/v62/github/repos_collaborators.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_comments.go b/vendor/github.com/google/go-github/v62/github/repos_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_comments.go rename to vendor/github.com/google/go-github/v62/github/repos_comments.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_commits.go b/vendor/github.com/google/go-github/v62/github/repos_commits.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_commits.go rename to vendor/github.com/google/go-github/v62/github/repos_commits.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_community_health.go b/vendor/github.com/google/go-github/v62/github/repos_community_health.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_community_health.go rename to vendor/github.com/google/go-github/v62/github/repos_community_health.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_contents.go b/vendor/github.com/google/go-github/v62/github/repos_contents.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_contents.go rename to vendor/github.com/google/go-github/v62/github/repos_contents.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_deployment_branch_policies.go b/vendor/github.com/google/go-github/v62/github/repos_deployment_branch_policies.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_deployment_branch_policies.go rename to vendor/github.com/google/go-github/v62/github/repos_deployment_branch_policies.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_deployment_protection_rules.go b/vendor/github.com/google/go-github/v62/github/repos_deployment_protection_rules.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_deployment_protection_rules.go rename to vendor/github.com/google/go-github/v62/github/repos_deployment_protection_rules.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_deployments.go b/vendor/github.com/google/go-github/v62/github/repos_deployments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_deployments.go rename to vendor/github.com/google/go-github/v62/github/repos_deployments.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_environments.go b/vendor/github.com/google/go-github/v62/github/repos_environments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_environments.go rename to vendor/github.com/google/go-github/v62/github/repos_environments.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_forks.go b/vendor/github.com/google/go-github/v62/github/repos_forks.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_forks.go rename to vendor/github.com/google/go-github/v62/github/repos_forks.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_hooks.go b/vendor/github.com/google/go-github/v62/github/repos_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_hooks.go rename to vendor/github.com/google/go-github/v62/github/repos_hooks.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_hooks_configuration.go b/vendor/github.com/google/go-github/v62/github/repos_hooks_configuration.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_hooks_configuration.go rename to vendor/github.com/google/go-github/v62/github/repos_hooks_configuration.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_hooks_deliveries.go b/vendor/github.com/google/go-github/v62/github/repos_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_hooks_deliveries.go rename to vendor/github.com/google/go-github/v62/github/repos_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_invitations.go b/vendor/github.com/google/go-github/v62/github/repos_invitations.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_invitations.go rename to vendor/github.com/google/go-github/v62/github/repos_invitations.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_keys.go b/vendor/github.com/google/go-github/v62/github/repos_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_keys.go rename to vendor/github.com/google/go-github/v62/github/repos_keys.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_lfs.go b/vendor/github.com/google/go-github/v62/github/repos_lfs.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_lfs.go rename to vendor/github.com/google/go-github/v62/github/repos_lfs.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_merging.go b/vendor/github.com/google/go-github/v62/github/repos_merging.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_merging.go rename to vendor/github.com/google/go-github/v62/github/repos_merging.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_pages.go b/vendor/github.com/google/go-github/v62/github/repos_pages.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_pages.go rename to vendor/github.com/google/go-github/v62/github/repos_pages.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/v62/github/repos_prereceive_hooks.go similarity index 93% rename from vendor/github.com/google/go-github/v60/github/repos_prereceive_hooks.go rename to vendor/github.com/google/go-github/v62/github/repos_prereceive_hooks.go index b1d3f3c818d..7d85c873620 100644 --- a/vendor/github.com/google/go-github/v60/github/repos_prereceive_hooks.go +++ b/vendor/github.com/google/go-github/v62/github/repos_prereceive_hooks.go @@ -24,7 +24,7 @@ func (p PreReceiveHook) String() string { // ListPreReceiveHooks lists all pre-receive hooks for the specified repository. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/repo-pre-receive-hooks#list-pre-receive-hooks-for-a-repository +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/repo-pre-receive-hooks#list-pre-receive-hooks-for-a-repository // //meta:operation GET /repos/{owner}/{repo}/pre-receive-hooks func (s *RepositoriesService) ListPreReceiveHooks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*PreReceiveHook, *Response, error) { @@ -53,7 +53,7 @@ func (s *RepositoriesService) ListPreReceiveHooks(ctx context.Context, owner, re // GetPreReceiveHook returns a single specified pre-receive hook. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/repo-pre-receive-hooks#get-a-pre-receive-hook-for-a-repository +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/repo-pre-receive-hooks#get-a-pre-receive-hook-for-a-repository // //meta:operation GET /repos/{owner}/{repo}/pre-receive-hooks/{pre_receive_hook_id} func (s *RepositoriesService) GetPreReceiveHook(ctx context.Context, owner, repo string, id int64) (*PreReceiveHook, *Response, error) { @@ -77,7 +77,7 @@ func (s *RepositoriesService) GetPreReceiveHook(ctx context.Context, owner, repo // UpdatePreReceiveHook updates a specified pre-receive hook. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/repo-pre-receive-hooks#update-pre-receive-hook-enforcement-for-a-repository +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/repo-pre-receive-hooks#update-pre-receive-hook-enforcement-for-a-repository // //meta:operation PATCH /repos/{owner}/{repo}/pre-receive-hooks/{pre_receive_hook_id} func (s *RepositoriesService) UpdatePreReceiveHook(ctx context.Context, owner, repo string, id int64, hook *PreReceiveHook) (*PreReceiveHook, *Response, error) { @@ -101,7 +101,7 @@ func (s *RepositoriesService) UpdatePreReceiveHook(ctx context.Context, owner, r // DeletePreReceiveHook deletes a specified pre-receive hook. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/repo-pre-receive-hooks#remove-pre-receive-hook-enforcement-for-a-repository +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/repo-pre-receive-hooks#remove-pre-receive-hook-enforcement-for-a-repository // //meta:operation DELETE /repos/{owner}/{repo}/pre-receive-hooks/{pre_receive_hook_id} func (s *RepositoriesService) DeletePreReceiveHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/repos_projects.go b/vendor/github.com/google/go-github/v62/github/repos_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_projects.go rename to vendor/github.com/google/go-github/v62/github/repos_projects.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_properties.go b/vendor/github.com/google/go-github/v62/github/repos_properties.go similarity index 53% rename from vendor/github.com/google/go-github/v60/github/repos_properties.go rename to vendor/github.com/google/go-github/v62/github/repos_properties.go index 5a8626c4530..5b12bc8b30e 100644 --- a/vendor/github.com/google/go-github/v60/github/repos_properties.go +++ b/vendor/github.com/google/go-github/v62/github/repos_properties.go @@ -31,3 +31,30 @@ func (s *RepositoriesService) GetAllCustomPropertyValues(ctx context.Context, or return customPropertyValues, resp, nil } + +// CreateOrUpdateCustomProperties creates new or updates existing custom property values for a repository. +// +// GitHub API docs: https://docs.github.com/rest/repos/custom-properties#create-or-update-custom-property-values-for-a-repository +// +//meta:operation PATCH /repos/{owner}/{repo}/properties/values +func (s *RepositoriesService) CreateOrUpdateCustomProperties(ctx context.Context, org, repo string, customPropertyValues []*CustomPropertyValue) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/properties/values", org, repo) + + params := struct { + Properties []*CustomPropertyValue `json:"properties"` + }{ + Properties: customPropertyValues, + } + + req, err := s.client.NewRequest("PATCH", u, params) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/google/go-github/v60/github/repos_releases.go b/vendor/github.com/google/go-github/v62/github/repos_releases.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_releases.go rename to vendor/github.com/google/go-github/v62/github/repos_releases.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_rules.go b/vendor/github.com/google/go-github/v62/github/repos_rules.go similarity index 95% rename from vendor/github.com/google/go-github/v60/github/repos_rules.go rename to vendor/github.com/google/go-github/v62/github/repos_rules.go index 479806c2ee9..6f046a356a6 100644 --- a/vendor/github.com/google/go-github/v60/github/repos_rules.go +++ b/vendor/github.com/google/go-github/v62/github/repos_rules.go @@ -112,8 +112,11 @@ type RequiredWorkflowsRuleParameters struct { // RepositoryRule represents a GitHub Rule. type RepositoryRule struct { - Type string `json:"type"` - Parameters *json.RawMessage `json:"parameters,omitempty"` + Type string `json:"type"` + Parameters *json.RawMessage `json:"parameters,omitempty"` + RulesetSourceType string `json:"ruleset_source_type"` + RulesetSource string `json:"ruleset_source"` + RulesetID int64 `json:"ruleset_id"` } // UnmarshalJSON implements the json.Unmarshaler interface. @@ -125,10 +128,13 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { return err } + r.RulesetID = RepositoryRule.RulesetID + r.RulesetSourceType = RepositoryRule.RulesetSourceType + r.RulesetSource = RepositoryRule.RulesetSource r.Type = RepositoryRule.Type switch RepositoryRule.Type { - case "creation", "deletion", "required_linear_history", "required_signatures", "non_fast_forward": + case "creation", "deletion", "merge_queue", "non_fast_forward", "required_linear_history", "required_signatures": r.Parameters = nil case "update": if RepositoryRule.Parameters == nil { @@ -198,12 +204,19 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { default: r.Type = "" r.Parameters = nil - return fmt.Errorf("RepositoryRule.Type %T is not yet implemented, unable to unmarshal", RepositoryRule.Type) + return fmt.Errorf("RepositoryRule.Type %q is not yet implemented, unable to unmarshal (%#v)", RepositoryRule.Type, RepositoryRule) } return nil } +// NewMergeQueueRule creates a rule to only allow merges via a merge queue. +func NewMergeQueueRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "merge_queue", + } +} + // NewCreationRule creates a rule to only allow users with bypass permission to create matching refs. func NewCreationRule() (rule *RepositoryRule) { return &RepositoryRule{ diff --git a/vendor/github.com/google/go-github/v60/github/repos_stats.go b/vendor/github.com/google/go-github/v62/github/repos_stats.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_stats.go rename to vendor/github.com/google/go-github/v62/github/repos_stats.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_statuses.go b/vendor/github.com/google/go-github/v62/github/repos_statuses.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_statuses.go rename to vendor/github.com/google/go-github/v62/github/repos_statuses.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_tags.go b/vendor/github.com/google/go-github/v62/github/repos_tags.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_tags.go rename to vendor/github.com/google/go-github/v62/github/repos_tags.go diff --git a/vendor/github.com/google/go-github/v60/github/repos_traffic.go b/vendor/github.com/google/go-github/v62/github/repos_traffic.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/repos_traffic.go rename to vendor/github.com/google/go-github/v62/github/repos_traffic.go diff --git a/vendor/github.com/google/go-github/v60/github/scim.go b/vendor/github.com/google/go-github/v62/github/scim.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/scim.go rename to vendor/github.com/google/go-github/v62/github/scim.go diff --git a/vendor/github.com/google/go-github/v60/github/search.go b/vendor/github.com/google/go-github/v62/github/search.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/search.go rename to vendor/github.com/google/go-github/v62/github/search.go diff --git a/vendor/github.com/google/go-github/v60/github/secret_scanning.go b/vendor/github.com/google/go-github/v62/github/secret_scanning.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/secret_scanning.go rename to vendor/github.com/google/go-github/v62/github/secret_scanning.go diff --git a/vendor/github.com/google/go-github/v60/github/security_advisories.go b/vendor/github.com/google/go-github/v62/github/security_advisories.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/security_advisories.go rename to vendor/github.com/google/go-github/v62/github/security_advisories.go diff --git a/vendor/github.com/google/go-github/v60/github/strings.go b/vendor/github.com/google/go-github/v62/github/strings.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/strings.go rename to vendor/github.com/google/go-github/v62/github/strings.go diff --git a/vendor/github.com/google/go-github/v60/github/teams.go b/vendor/github.com/google/go-github/v62/github/teams.go similarity index 99% rename from vendor/github.com/google/go-github/v60/github/teams.go rename to vendor/github.com/google/go-github/v62/github/teams.go index fd22b792a66..0f6cc9d16e4 100644 --- a/vendor/github.com/google/go-github/v60/github/teams.go +++ b/vendor/github.com/google/go-github/v62/github/teams.go @@ -155,6 +155,9 @@ type NewTeam struct { RepoNames []string `json:"repo_names,omitempty"` ParentTeamID *int64 `json:"parent_team_id,omitempty"` + // NotificationSetting can be one of: "notifications_enabled", "notifications_disabled". + NotificationSetting *string `json:"notification_setting,omitempty"` + // Deprecated: Permission is deprecated when creating or editing a team in an org // using the new GitHub permission model. It no longer identifies the // permission a team has on its repos, but only specifies the default diff --git a/vendor/github.com/google/go-github/v60/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/v62/github/teams_discussion_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/teams_discussion_comments.go rename to vendor/github.com/google/go-github/v62/github/teams_discussion_comments.go diff --git a/vendor/github.com/google/go-github/v60/github/teams_discussions.go b/vendor/github.com/google/go-github/v62/github/teams_discussions.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/teams_discussions.go rename to vendor/github.com/google/go-github/v62/github/teams_discussions.go diff --git a/vendor/github.com/google/go-github/v60/github/teams_members.go b/vendor/github.com/google/go-github/v62/github/teams_members.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/teams_members.go rename to vendor/github.com/google/go-github/v62/github/teams_members.go diff --git a/vendor/github.com/google/go-github/v60/github/timestamp.go b/vendor/github.com/google/go-github/v62/github/timestamp.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/timestamp.go rename to vendor/github.com/google/go-github/v62/github/timestamp.go diff --git a/vendor/github.com/google/go-github/v60/github/users.go b/vendor/github.com/google/go-github/v62/github/users.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users.go rename to vendor/github.com/google/go-github/v62/github/users.go diff --git a/vendor/github.com/google/go-github/v60/github/users_administration.go b/vendor/github.com/google/go-github/v62/github/users_administration.go similarity index 91% rename from vendor/github.com/google/go-github/v60/github/users_administration.go rename to vendor/github.com/google/go-github/v62/github/users_administration.go index 5b9e1de8c60..2c86af7336d 100644 --- a/vendor/github.com/google/go-github/v60/github/users_administration.go +++ b/vendor/github.com/google/go-github/v62/github/users_administration.go @@ -12,7 +12,7 @@ import ( // PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#promote-a-user-to-be-a-site-administrator +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#promote-a-user-to-be-a-site-administrator // //meta:operation PUT /users/{username}/site_admin func (s *UsersService) PromoteSiteAdmin(ctx context.Context, user string) (*Response, error) { @@ -28,7 +28,7 @@ func (s *UsersService) PromoteSiteAdmin(ctx context.Context, user string) (*Resp // DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#demote-a-site-administrator +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#demote-a-site-administrator // //meta:operation DELETE /users/{username}/site_admin func (s *UsersService) DemoteSiteAdmin(ctx context.Context, user string) (*Response, error) { @@ -49,7 +49,7 @@ type UserSuspendOptions struct { // Suspend a user on a GitHub Enterprise instance. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#suspend-a-user +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#suspend-a-user // //meta:operation PUT /users/{username}/suspended func (s *UsersService) Suspend(ctx context.Context, user string, opts *UserSuspendOptions) (*Response, error) { @@ -65,7 +65,7 @@ func (s *UsersService) Suspend(ctx context.Context, user string, opts *UserSuspe // Unsuspend a user on a GitHub Enterprise instance. // -// GitHub API docs: https://docs.github.com/enterprise-server@3.11/rest/enterprise-admin/users#unsuspend-a-user +// GitHub API docs: https://docs.github.com/enterprise-server@3.12/rest/enterprise-admin/users#unsuspend-a-user // //meta:operation DELETE /users/{username}/suspended func (s *UsersService) Unsuspend(ctx context.Context, user string) (*Response, error) { diff --git a/vendor/github.com/google/go-github/v60/github/users_blocking.go b/vendor/github.com/google/go-github/v62/github/users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_blocking.go rename to vendor/github.com/google/go-github/v62/github/users_blocking.go diff --git a/vendor/github.com/google/go-github/v60/github/users_emails.go b/vendor/github.com/google/go-github/v62/github/users_emails.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_emails.go rename to vendor/github.com/google/go-github/v62/github/users_emails.go diff --git a/vendor/github.com/google/go-github/v60/github/users_followers.go b/vendor/github.com/google/go-github/v62/github/users_followers.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_followers.go rename to vendor/github.com/google/go-github/v62/github/users_followers.go diff --git a/vendor/github.com/google/go-github/v60/github/users_gpg_keys.go b/vendor/github.com/google/go-github/v62/github/users_gpg_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_gpg_keys.go rename to vendor/github.com/google/go-github/v62/github/users_gpg_keys.go diff --git a/vendor/github.com/google/go-github/v60/github/users_keys.go b/vendor/github.com/google/go-github/v62/github/users_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_keys.go rename to vendor/github.com/google/go-github/v62/github/users_keys.go diff --git a/vendor/github.com/google/go-github/v60/github/users_packages.go b/vendor/github.com/google/go-github/v62/github/users_packages.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_packages.go rename to vendor/github.com/google/go-github/v62/github/users_packages.go diff --git a/vendor/github.com/google/go-github/v60/github/users_projects.go b/vendor/github.com/google/go-github/v62/github/users_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_projects.go rename to vendor/github.com/google/go-github/v62/github/users_projects.go diff --git a/vendor/github.com/google/go-github/v60/github/users_ssh_signing_keys.go b/vendor/github.com/google/go-github/v62/github/users_ssh_signing_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/users_ssh_signing_keys.go rename to vendor/github.com/google/go-github/v62/github/users_ssh_signing_keys.go diff --git a/vendor/github.com/google/go-github/v60/github/with_appengine.go b/vendor/github.com/google/go-github/v62/github/with_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/with_appengine.go rename to vendor/github.com/google/go-github/v62/github/with_appengine.go diff --git a/vendor/github.com/google/go-github/v60/github/without_appengine.go b/vendor/github.com/google/go-github/v62/github/without_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v60/github/without_appengine.go rename to vendor/github.com/google/go-github/v62/github/without_appengine.go diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 182c926b908..860bb304c34 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c6c..4580bab1839 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282cb8e..eee0132e740 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 60ef7e92687..5551eb0bfa4 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -145,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -627,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" @@ -839,7 +847,7 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d995..fcd049de922 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae102..2af3ee3dc1c 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee592..8919232fd88 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc874..8fac3841be5 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed97748..e9aa5d14c0d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a7..418331a4bde 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7ed..548f31da2d5 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,12 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +305,17 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, } ) @@ -454,7 +465,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +504,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1196,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1242,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1801,365 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2158,7 +2176,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2201,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2209,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2221,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2256,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2268,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2280,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2292,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2304,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2316,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2328,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2340,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2352,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2364,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2376,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2388,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2400,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2412,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2424,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2436,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2448,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2461,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c304..c93f75a78b0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a7..e76509ef01a 100644 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a6..4057e70c8ad 100644 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 85a8379d833..a6402ee48cc 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -28,7 +28,6 @@ import ( "os" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" @@ -38,8 +37,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -59,9 +59,9 @@ type s2av2TransportCreds struct { transportCreds credentials.TransportCredentials tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity + localIdentity *commonpb.Identity // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity + localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) @@ -70,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -183,13 +183,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) + conn, authInfo, err := creds.ClientHandshake(timeoutCtx, serverName, rawConn) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -197,7 +191,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } return nil, nil, err } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + grpclog.Infof("client-side handshake is done using S2Av2 to: %s", serverName) return conn, authInfo, err } @@ -247,13 +241,7 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) + conn, authInfo, err := creds.ServerHandshake(rawConn) if err != nil { grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) return nil, nil, err @@ -280,15 +268,15 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { tokenManager = *c.tokenManager } verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity + var localIdentity *commonpb.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) } - var localIdentities []*commonpbv1.Identity + var localIdentities []*commonpb.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) } } creds := &s2av2TransportCreds{ diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 4d919132295..fa0002e36b7 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -44,8 +43,8 @@ const ( ) // GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpb.Identity{localIdentity}) if grpclog.V(1) { grpclog.Infof("Sending request to S2Av2 for client TLS config.") @@ -126,7 +125,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr } // GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { return &tls.Config{ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), }, nil @@ -136,7 +135,7 @@ func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager token // connection with a client, based on SNI communicated during ClientHello. // Ensures that server presents the correct certificate to establish a TLS // connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) if err != nil { @@ -219,9 +218,9 @@ func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { } } -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity + var locID *commonpb.Identity if localIdentities != nil { locID = localIdentities[0] } @@ -283,7 +282,7 @@ func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsCo return clientAuth } -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity) []*s2av2pb.AuthenticationMechanism { if tokenManager == nil { return nil } diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 5ecb06f930e..cc79bd09a67 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -29,7 +29,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker" "github.com/google/s2a-go/internal/handshaker/service" @@ -38,8 +37,10 @@ import ( "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -54,17 +55,17 @@ const ( // credentials.TransportCredentials interface. type s2aTransportCreds struct { info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion + minTLSVersion commonpbv1.TLSVersion + maxTLSVersion commonpbv1.TLSVersion // tlsCiphersuites contains the ciphersuites used in the S2A connection. // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite + tlsCiphersuites []commonpbv1.Ciphersuite // localIdentity should only be used by the client. - localIdentity *commonpb.Identity + localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity + localIdentities []*commonpbv1.Identity // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity + targetIdentities []*commonpbv1.Identity isClient bool s2aAddr string ensureProcessSessionTickets *sync.WaitGroup @@ -76,7 +77,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil client options") } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity for _, targetIdentity := range opts.TargetIdentities { protoTargetIdentity, err := toProtoIdentity(targetIdentity) if err != nil { @@ -93,12 +94,12 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentity: localIdentity, targetIdentities: targetIdentities, @@ -112,7 +113,11 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + v2LocalIdentity, err := toV2ProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -121,7 +126,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil server options") } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity for _, localIdentity := range opts.LocalIdentities { protoLocalIdentity, err := toProtoIdentity(localIdentity) if err != nil { @@ -134,12 +139,12 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentities: localIdentities, isClient: false, @@ -147,7 +152,15 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) + var v2LocalIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toV2ProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + v2LocalIdentities = append(v2LocalIdentities, protoLocalIdentity) + } + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -248,22 +261,22 @@ func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { info := *c.info - var localIdentity *commonpb.Identity + var localIdentity *commonpbv1.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) } } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + targetIdentities = make([]*commonpbv1.Identity, len(c.targetIdentities)) for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpbv1.Identity) } } return &s2aTransportCreds{ @@ -351,6 +364,12 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE case Spiffe: return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + case ReservedCustomVerificationMode3: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 + case ReservedCustomVerificationMode4: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 + case ReservedCustomVerificationMode5: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } @@ -396,24 +415,20 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net defer cancel() var s2aTLSConfig *tls.Config + var c net.Conn retry.Run(timeoutCtx, func() error { s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ ServerName: serverName, }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return err + } - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } c, err = s2aDialer.DialContext(timeoutCtx, network, addr) return err }) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index fcdbc1621bd..5bbf31bf412 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -28,7 +28,8 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) // Identity is the interface for S2A identities. @@ -76,9 +77,12 @@ type VerificationModeType int // Three types of verification modes. const ( - Unspecified = iota - ConnectToGoogle + Unspecified VerificationModeType = iota Spiffe + ConnectToGoogle + ReservedCustomVerificationMode3 + ReservedCustomVerificationMode4 + ReservedCustomVerificationMode5 ) // ClientOptions contains the client-side options used to establish a secure @@ -198,7 +202,23 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { +func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} + +func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { if identity == nil { return nil, nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d51736e7e36..44d4d00202f 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.4" + "v2": "2.13.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 7e36eb48ff8..d63421b71ca 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) + + +### Features + +* **iterator:** add package to help work with new iter.Seq types ([#358](https://github.com/googleapis/gax-go/issues/358)) ([6bccdaa](https://github.com/googleapis/gax-go/commit/6bccdaac011fe6fd147e4eb533a8e6520b7d4acc)) + +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + ## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065cab..7de60773d63 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 3e53729e5fc..f5273985afc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -163,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } + } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} } + return out } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 3006ad7bd91..e12421cf599 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.4" +const Version = "2.13.0" diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 0eec3e6dd6d..dc38bf58710 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,3 +1,25 @@ +## v1.14.0 (2024-07-24) + +* [GH-3095](https://github.com/gophercloud/gophercloud/pull/3095) [neutron]: introduce Description argument for the portforwarding +* [GH-3098](https://github.com/gophercloud/gophercloud/pull/3098) [neutron]: introduce Stateful argument for the security groups +* [GH-3099](https://github.com/gophercloud/gophercloud/pull/3099) [networking]: subnet add field dns_publish_fixed_ip + +## v1.13.0 (2024-07-08) + +* [GH-3044](https://github.com/gophercloud/gophercloud/pull/3044) [v1] Add ci jobs for openstack caracal +* [GH-3073](https://github.com/gophercloud/gophercloud/pull/3073) [v1] Adding missing QoS field for router +* [GH-3080](https://github.com/gophercloud/gophercloud/pull/3080) [networking]: add BGP VPNs support (backport to 1.x) + +## v1.12.0 (2024-05-27) + +* [GH-2979](https://github.com/gophercloud/gophercloud/pull/2979) [v1] CI backports +* [GH-2985](https://github.com/gophercloud/gophercloud/pull/2985) [v1] baremetal: fix handling of the "fields" query argument +* [GH-2989](https://github.com/gophercloud/gophercloud/pull/2989) [v1] [CI] Fix portbiding tests +* [GH-2992](https://github.com/gophercloud/gophercloud/pull/2992) [v1] [CI] Fix portbiding tests +* [GH-2993](https://github.com/gophercloud/gophercloud/pull/2993) [v1] build(deps): bump EmilienM/devstack-action from 0.14 to 0.15 +* [GH-2998](https://github.com/gophercloud/gophercloud/pull/2998) [v1] testhelper: mark all helpers with t.Helper +* [GH-3043](https://github.com/gophercloud/gophercloud/pull/3043) [v1] CI: remove Zed from testing coverage + ## v1.11.0 (2024-03-07) This version reverts the inclusion of Context in the v1 branch. This inclusion diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 17b200cd239..5abc2c55899 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -318,8 +318,15 @@ converted into query parameters based on a "q" tag. For example: will be converted into "?x_bar=AAA&lorem_ipsum=BBB". -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. +The struct's fields may be strings, integers, slices, or boolean values. Fields +left at their type's zero value will be omitted from the query. + +Slice are handled in one of two ways: + + type struct Something { + Bar []string `q:"bar"` // E.g. ?bar=1&bar=2 + Baz []int `q:"baz" format="comma-separated"` // E.g. ?baz=1,2 + } */ func BuildQueryString(opts interface{}) (*url.URL, error) { optsValue := reflect.ValueOf(opts) @@ -358,16 +365,22 @@ func BuildQueryString(opts interface{}) (*url.URL, error) { case reflect.Bool: params.Add(tags[0], strconv.FormatBool(v.Bool())) case reflect.Slice: + var values []string switch v.Type().Elem() { case reflect.TypeOf(0): for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + values = append(values, strconv.FormatInt(v.Index(i).Int(), 10)) } default: for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) + values = append(values, v.Index(i).String()) } } + if sliceFormat := f.Tag.Get("format"); sliceFormat == "comma-separated" { + params.Add(tags[0], strings.Join(values, ",")) + } else { + params[tags[0]] = append(params[tags[0]], values...) + } case reflect.Map: if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { var s []string diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 1ff54b8197f..6e57f48843c 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -14,7 +14,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v1.11.0" + DefaultUserAgent = "gophercloud/v1.14.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go index 9b2f3e6d8dd..def21e5bf15 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go @@ -7,6 +7,7 @@ import ( "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" + "google.golang.org/grpc/reflection" ) // ServerMetrics represents a collection of metrics to be registered on a @@ -81,7 +82,7 @@ func (m *ServerMetrics) Collect(ch chan<- prometheus.Metric) { // value, for all gRPC methods registered on a gRPC server. This is useful, to // ensure that all metrics exist when collecting and querying. // NOTE: This might add significant cardinality and might not be needed in future version of Prometheus (created timestamp). -func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { +func (m *ServerMetrics) InitializeMetrics(server reflection.ServiceInfoProvider) { serviceInfo := server.GetServiceInfo() for serviceName, info := range serviceInfo { for _, mInfo := range info.Methods { diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go new file mode 100644 index 00000000000..16a18288d99 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go @@ -0,0 +1,66 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package interceptors + +import ( + "fmt" + "strings" + + "google.golang.org/grpc" +) + +func splitFullMethodName(fullMethod string) (string, string) { + fullMethod = strings.TrimPrefix(fullMethod, "/") // remove leading slash + if i := strings.Index(fullMethod, "/"); i >= 0 { + return fullMethod[:i], fullMethod[i+1:] + } + return "unknown", "unknown" +} + +type CallMeta struct { + ReqOrNil any + Typ GRPCType + Service string + Method string + IsClient bool +} + +func NewClientCallMeta(fullMethod string, streamDesc *grpc.StreamDesc, reqOrNil any) CallMeta { + c := CallMeta{IsClient: true, ReqOrNil: reqOrNil, Typ: Unary} + if streamDesc != nil { + c.Typ = clientStreamType(streamDesc) + } + c.Service, c.Method = splitFullMethodName(fullMethod) + return c +} + +func NewServerCallMeta(fullMethod string, streamInfo *grpc.StreamServerInfo, reqOrNil any) CallMeta { + c := CallMeta{IsClient: false, ReqOrNil: reqOrNil, Typ: Unary} + if streamInfo != nil { + c.Typ = serverStreamType(streamInfo) + } + c.Service, c.Method = splitFullMethodName(fullMethod) + return c +} +func (c CallMeta) FullMethod() string { + return fmt.Sprintf("/%s/%s", c.Service, c.Method) +} + +func clientStreamType(desc *grpc.StreamDesc) GRPCType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +func serverStreamType(info *grpc.StreamServerInfo) GRPCType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go index 86c51a07924..571e826e814 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go @@ -15,9 +15,9 @@ import ( // UnaryClientInterceptor is a gRPC client-side interceptor that provides reporting for Unary RPCs. func UnaryClientInterceptor(reportable ClientReportable) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - r := newReport(Unary, method) - reporter, newCtx := reportable.ClientReporter(ctx, CallMeta{ReqProtoOrNil: req, Typ: r.rpcType, Service: r.service, Method: r.method}) + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + r := newReport(NewClientCallMeta(method, nil, req)) + reporter, newCtx := reportable.ClientReporter(ctx, r.callMeta) reporter.PostMsgSend(req, nil, time.Since(r.startTime)) err := invoker(newCtx, method, req, reply, cc, opts...) @@ -30,8 +30,8 @@ func UnaryClientInterceptor(reportable ClientReportable) grpc.UnaryClientInterce // StreamClientInterceptor is a gRPC client-side interceptor that provides reporting for Stream RPCs. func StreamClientInterceptor(reportable ClientReportable) grpc.StreamClientInterceptor { return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - r := newReport(clientStreamType(desc), method) - reporter, newCtx := reportable.ClientReporter(ctx, CallMeta{ReqProtoOrNil: nil, Typ: r.rpcType, Service: r.service, Method: r.method}) + r := newReport(NewClientCallMeta(method, desc, nil)) + reporter, newCtx := reportable.ClientReporter(ctx, r.callMeta) clientStream, err := streamer(newCtx, desc, cc, method, opts...) if err != nil { @@ -42,15 +42,6 @@ func StreamClientInterceptor(reportable ClientReportable) grpc.StreamClientInter } } -func clientStreamType(desc *grpc.StreamDesc) GRPCType { - if desc.ClientStreams && !desc.ServerStreams { - return ClientStream - } else if !desc.ClientStreams && desc.ServerStreams { - return ServerStream - } - return BidiStream -} - // monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to report. type monitoredClientStream struct { grpc.ClientStream @@ -59,14 +50,14 @@ type monitoredClientStream struct { reporter Reporter } -func (s *monitoredClientStream) SendMsg(m interface{}) error { +func (s *monitoredClientStream) SendMsg(m any) error { start := time.Now() err := s.ClientStream.SendMsg(m) s.reporter.PostMsgSend(m, err, time.Since(start)) return err } -func (s *monitoredClientStream) RecvMsg(m interface{}) error { +func (s *monitoredClientStream) RecvMsg(m any) error { start := time.Now() err := s.ClientStream.RecvMsg(m) s.reporter.PostMsgReceive(m, err, time.Since(start)) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go index cc3b4f136d4..c93a3af79d2 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go @@ -5,8 +5,6 @@ package interceptors import ( "context" - "fmt" - "strings" "time" "google.golang.org/grpc/codes" @@ -14,23 +12,6 @@ import ( type GRPCType string -// Timer is a helper interface to time functions. -// Useful for interceptors to record the total -// time elapsed since completion of a call. -type Timer interface { - ObserveDuration() time.Duration -} - -// zeroTimer. -type zeroTimer struct { -} - -func (zeroTimer) ObserveDuration() time.Duration { - return 0 -} - -var EmptyTimer = &zeroTimer{} - const ( Unary GRPCType = "unary" ClientStream GRPCType = "client_stream" @@ -47,25 +28,6 @@ var ( } ) -func SplitMethodName(fullMethod string) (string, string) { - fullMethod = strings.TrimPrefix(fullMethod, "/") // remove leading slash - if i := strings.Index(fullMethod, "/"); i >= 0 { - return fullMethod[:i], fullMethod[i+1:] - } - return "unknown", "unknown" -} - -type CallMeta struct { - ReqProtoOrNil interface{} - Typ GRPCType - Service string - Method string -} - -func (c CallMeta) FullMethod() string { - return fmt.Sprintf("/%s/%s", c.Service, c.Method) -} - type ClientReportable interface { ClientReporter(context.Context, CallMeta) (Reporter, context.Context) } @@ -75,42 +37,39 @@ type ServerReportable interface { } // CommonReportableFunc helper allows an easy way to implement reporter with common client and server logic. -type CommonReportableFunc func(ctx context.Context, c CallMeta, isClient bool) (Reporter, context.Context) +type CommonReportableFunc func(ctx context.Context, c CallMeta) (Reporter, context.Context) func (f CommonReportableFunc) ClientReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) { - return f(ctx, c, true) + return f(ctx, c) } func (f CommonReportableFunc) ServerReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) { - return f(ctx, c, false) + return f(ctx, c) } type Reporter interface { PostCall(err error, rpcDuration time.Duration) - PostMsgSend(reqProto interface{}, err error, sendDuration time.Duration) - PostMsgReceive(replyProto interface{}, err error, recvDuration time.Duration) + PostMsgSend(reqProto any, err error, sendDuration time.Duration) + PostMsgReceive(replyProto any, err error, recvDuration time.Duration) } var _ Reporter = NoopReporter{} type NoopReporter struct{} -func (NoopReporter) PostCall(error, time.Duration) {} -func (NoopReporter) PostMsgSend(interface{}, error, time.Duration) {} -func (NoopReporter) PostMsgReceive(interface{}, error, time.Duration) {} +func (NoopReporter) PostCall(error, time.Duration) {} +func (NoopReporter) PostMsgSend(any, error, time.Duration) {} +func (NoopReporter) PostMsgReceive(any, error, time.Duration) {} type report struct { - rpcType GRPCType - service string - method string + callMeta CallMeta startTime time.Time } -func newReport(typ GRPCType, fullMethod string) report { +func newReport(callMeta CallMeta) report { r := report{ startTime: time.Now(), - rpcType: typ, + callMeta: callMeta, } - r.service, r.method = SplitMethodName(fullMethod) return r } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go index 1fcb8e4e97b..0484109069a 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go @@ -14,9 +14,9 @@ import ( // UnaryServerInterceptor is a gRPC server-side interceptor that provides reporting for Unary RPCs. func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - r := newReport(Unary, info.FullMethod) - reporter, newCtx := reportable.ServerReporter(ctx, CallMeta{ReqProtoOrNil: req, Typ: r.rpcType, Service: r.service, Method: r.method}) + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + r := newReport(NewServerCallMeta(info.FullMethod, nil, req)) + reporter, newCtx := reportable.ServerReporter(ctx, r.callMeta) reporter.PostMsgReceive(req, nil, time.Since(r.startTime)) resp, err := handler(newCtx, req) @@ -29,24 +29,15 @@ func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterce // StreamServerInterceptor is a gRPC server-side interceptor that provides reporting for Streaming RPCs. func StreamServerInterceptor(reportable ServerReportable) grpc.StreamServerInterceptor { - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - r := newReport(ServerStream, info.FullMethod) - reporter, newCtx := reportable.ServerReporter(ss.Context(), CallMeta{ReqProtoOrNil: nil, Typ: StreamRPCType(info), Service: r.service, Method: r.method}) + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + r := newReport(NewServerCallMeta(info.FullMethod, info, nil)) + reporter, newCtx := reportable.ServerReporter(ss.Context(), r.callMeta) err := handler(srv, &monitoredServerStream{ServerStream: ss, newCtx: newCtx, reporter: reporter}) reporter.PostCall(err, time.Since(r.startTime)) return err } } -func StreamRPCType(info *grpc.StreamServerInfo) GRPCType { - if info.IsClientStream && !info.IsServerStream { - return ClientStream - } else if !info.IsClientStream && info.IsServerStream { - return ServerStream - } - return BidiStream -} - // monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to report. type monitoredServerStream struct { grpc.ServerStream @@ -59,14 +50,14 @@ func (s *monitoredServerStream) Context() context.Context { return s.newCtx } -func (s *monitoredServerStream) SendMsg(m interface{}) error { +func (s *monitoredServerStream) SendMsg(m any) error { start := time.Now() err := s.ServerStream.SendMsg(m) s.reporter.PostMsgSend(m, err, time.Since(start)) return err } -func (s *monitoredServerStream) RecvMsg(m interface{}) error { +func (s *monitoredServerStream) RecvMsg(m any) error { start := time.Now() err := s.ServerStream.RecvMsg(m) s.reporter.PostMsgReceive(m, err, time.Since(start)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 31553e7848a..5dd4e447862 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -148,6 +148,12 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM var pairs []string for key, vals := range req.Header { key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + for _, val := range vals { // For backwards-compatibility, pass through 'authorization' header with no prefix. if key == "Authorization" { @@ -181,18 +187,17 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) } + xff := req.Header.Values(xForwardedFor) if addr := req.RemoteAddr; addr != "" { if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } + xff = append(xff, remoteIP) } } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } if timeout != 0 { - //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 230cac7b86c..5682998699a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -71,7 +71,7 @@ func HTTPStatusFromCode(code codes.Code) int { case codes.DataLoss: return http.StatusInternalServerError default: - grpclog.Infof("Unknown gRPC error code: %v", code) + grpclog.Warningf("Unknown gRPC error code: %v", code) return http.StatusInternalServerError } } @@ -114,17 +114,17 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh buf, merr := marshaler.Marshal(pb) if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } return } md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -148,7 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh w.WriteHeader(st) if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 5e14cf8b0e2..de1eef1f4f8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/textproto" + "strconv" "strings" "google.golang.org/genproto/googleapis/api/httpbody" @@ -17,16 +18,10 @@ import ( // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - + rc := http.NewResponseController(w) md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") http.Error(w, "unexpected error", http.StatusInternalServerError) return } @@ -81,20 +76,29 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) + grpclog.Errorf("Failed to marshal response chunk: %v", err) handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) + grpclog.Errorf("Failed to send response chunk: %v", err) return } wroteHeader = true if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) return } - f.Flush() } } @@ -136,7 +140,7 @@ type responseBody interface { func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -168,13 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha buf, err = marshaler.Marshal(resp) } if err != nil { - grpclog.Infof("Marshal error: %v", err) + grpclog.Errorf("Marshal error: %v", err) HTTPError(ctx, mux, marshaler, w, req, err) return } + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { @@ -193,7 +201,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) return err } } @@ -209,15 +217,15 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar } buf, err := marshaler.Marshal(msg) if err != nil { - grpclog.Infof("Failed to marshal an error: %v", err) + grpclog.Errorf("Failed to marshal an error: %v", err) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to notify error to client: %v", err) + grpclog.Errorf("Failed to notify error to client: %v", err) return } if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) return } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index d6aa8257836..fe52081ab94 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -24,6 +24,11 @@ func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + // Unmarshal unmarshals JSON data into "v". func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { return json.Unmarshal(data, v) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 51b8247da2a..8376d1e0efd 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -30,10 +30,6 @@ func (*JSONPb) ContentType(_ interface{}) string { // Marshal marshals "v" into JSON. func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - var buf bytes.Buffer if err := j.marshalTo(&buf, v); err != nil { return nil, err @@ -48,9 +44,17 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { if err != nil { return err } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } _, err = w.Write(buf) return err } + b, err := j.MarshalOptions.Marshal(p) if err != nil { return err @@ -150,9 +154,6 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } return json.Marshal(m) } if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index a714de02406..0b051e6e894 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -46,7 +46,7 @@ func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, out for _, contentTypeVal := range r.Header[contentTypeHeader] { contentType, _, err := mime.ParseMediaType(contentTypeVal) if err != nil { - grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) continue } if m, ok := mux.marshalers.mimeMap[contentType]; ok { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 8f90d15a562..e54507145b6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -52,13 +52,13 @@ type Pattern struct { // It returns an error if the given definition is invalid. func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { - grpclog.Infof("unsupported version: %d", version) + grpclog.Errorf("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern } l := len(ops) if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) + grpclog.Errorf("odd number of ops codes: %d", l) return Pattern{}, ErrInvalidPattern } @@ -81,14 +81,14 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpPushM: if pushMSeen { - grpclog.Infof("pushM appears twice") + grpclog.Error("pushM appears twice") return Pattern{}, ErrInvalidPattern } pushMSeen = true stack++ case utilities.OpLitPush: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) + grpclog.Errorf("negative literal index: %d", op.operand) return Pattern{}, ErrInvalidPattern } if pushMSeen { @@ -97,18 +97,18 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpConcatN: if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) + grpclog.Errorf("negative concat size: %d", op.operand) return Pattern{}, ErrInvalidPattern } stack -= op.operand if stack < 0 { - grpclog.Info("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ case utilities.OpCapture: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) + grpclog.Errorf("variable name index out of bound: %d", op.operand) return Pattern{}, ErrInvalidPattern } v := pool[op.operand] @@ -116,11 +116,11 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er vars = append(vars, v) stack-- if stack < 0 { - grpclog.Infof("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } default: - grpclog.Infof("invalid opcode: %d", op.code) + grpclog.Errorf("invalid opcode: %d", op.code) return Pattern{}, ErrInvalidPattern } diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index 7070445cc02..4bc1390b93a 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -6,6 +6,7 @@ package api import ( "errors" "math/rand" + "strings" "sync" "time" @@ -289,12 +290,18 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, switch { case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: // Can't or won't renew, just keep the same expiration so we exit - // when it's reauthentication time + // when it's re-authentication time remainingLeaseDuration = fallbackLeaseDuration default: // Renew the token renewal, err = renew(credString, r.increment) + if err != nil && strings.Contains(err.Error(), "permission denied") { + // We can't renew since the token doesn't have permission to. Fall back + // to the code path for non-renewable tokens. + nonRenewable = true + continue + } if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { if r.renewBehavior == RenewBehaviorErrorOnErrors { if err != nil { diff --git a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md index 5f780fdc3ce..61b4695fd5b 100644 --- a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md +++ b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md @@ -1,3 +1,22 @@ +# 5.6.0 (May 25, 2024) + +* Add StrictNamedArgs (Tomas Zahradnicek) +* Add support for macaddr8 type (Carlos Pérez-Aradros Herce) +* Add SeverityUnlocalized field to PgError / Notice +* Performance optimization of RowToStructByPos/Name (Zach Olstein) +* Allow customizing context canceled behavior for pgconn +* Add ScanLocation to pgtype.Timestamp[tz]Codec +* Add custom data to pgconn.PgConn +* Fix ResultReader.Read() to handle nil values +* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce) +* pgconn.SafeToRetry checks for wrapped errors (tjasko) +* Failed connection attempts include all errors +* Optimize LargeObject.Read (Mitar) +* Add tracing for connection acquire and release from pool (ngavinsir) +* Fix encode driver.Valuer not called when nil +* Add support for custom JSON marshal and unmarshal (Mitar) +* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck) + # 5.5.5 (March 9, 2024) Use spaces instead of parentheses for SQL sanitization. diff --git a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md index 6ed3205ce2c..c975a937242 100644 --- a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md +++ b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md @@ -29,6 +29,7 @@ Create and setup a test database: export PGDATABASE=pgx_test createdb psql -c 'create extension hstore;' +psql -c 'create extension ltree;' psql -c 'create domain uint64 as numeric(20,0);' ``` diff --git a/vendor/github.com/jackc/pgx/v5/README.md b/vendor/github.com/jackc/pgx/v5/README.md index 49f2c3d781a..0cf2c2916fe 100644 --- a/vendor/github.com/jackc/pgx/v5/README.md +++ b/vendor/github.com/jackc/pgx/v5/README.md @@ -92,7 +92,7 @@ See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube. ## Supported Go and PostgreSQL Versions -pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.20 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/). +pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.21 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/). ## Version Policy diff --git a/vendor/github.com/jackc/pgx/v5/batch.go b/vendor/github.com/jackc/pgx/v5/batch.go index b9b46d1d75c..3540f57f53c 100644 --- a/vendor/github.com/jackc/pgx/v5/batch.go +++ b/vendor/github.com/jackc/pgx/v5/batch.go @@ -12,7 +12,7 @@ import ( type QueuedQuery struct { SQL string Arguments []any - fn batchItemFunc + Fn batchItemFunc sd *pgconn.StatementDescription } @@ -20,7 +20,7 @@ type batchItemFunc func(br BatchResults) error // Query sets fn to be called when the response to qq is received. func (qq *QueuedQuery) Query(fn func(rows Rows) error) { - qq.fn = func(br BatchResults) error { + qq.Fn = func(br BatchResults) error { rows, _ := br.Query() defer rows.Close() @@ -36,7 +36,7 @@ func (qq *QueuedQuery) Query(fn func(rows Rows) error) { // Query sets fn to be called when the response to qq is received. func (qq *QueuedQuery) QueryRow(fn func(row Row) error) { - qq.fn = func(br BatchResults) error { + qq.Fn = func(br BatchResults) error { row := br.QueryRow() return fn(row) } @@ -44,7 +44,7 @@ func (qq *QueuedQuery) QueryRow(fn func(row Row) error) { // Exec sets fn to be called when the response to qq is received. func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) { - qq.fn = func(br BatchResults) error { + qq.Fn = func(br BatchResults) error { ct, err := br.Exec() if err != nil { return err @@ -228,8 +228,8 @@ func (br *batchResults) Close() error { // Read and run fn for all remaining items for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { - if br.b.QueuedQueries[br.qqIdx].fn != nil { - err := br.b.QueuedQueries[br.qqIdx].fn(br) + if br.b.QueuedQueries[br.qqIdx].Fn != nil { + err := br.b.QueuedQueries[br.qqIdx].Fn(br) if err != nil { br.err = err } @@ -397,8 +397,8 @@ func (br *pipelineBatchResults) Close() error { // Read and run fn for all remaining items for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { - if br.b.QueuedQueries[br.qqIdx].fn != nil { - err := br.b.QueuedQueries[br.qqIdx].fn(br) + if br.b.QueuedQueries[br.qqIdx].Fn != nil { + err := br.b.QueuedQueries[br.qqIdx].Fn(br) if err != nil { br.err = err } diff --git a/vendor/github.com/jackc/pgx/v5/conn.go b/vendor/github.com/jackc/pgx/v5/conn.go index fc72c732e97..31172145993 100644 --- a/vendor/github.com/jackc/pgx/v5/conn.go +++ b/vendor/github.com/jackc/pgx/v5/conn.go @@ -10,7 +10,6 @@ import ( "strings" "time" - "github.com/jackc/pgx/v5/internal/anynil" "github.com/jackc/pgx/v5/internal/sanitize" "github.com/jackc/pgx/v5/internal/stmtcache" "github.com/jackc/pgx/v5/pgconn" @@ -624,7 +623,7 @@ const ( // to execute. It does not use named prepared statements. But it does use the unnamed prepared statement to get the // statement description on the first round trip and then uses it to execute the query on the second round trip. This // may cause problems with connection poolers that switch the underlying connection between round trips. It is safe - // even when the the database schema is modified concurrently. + // even when the database schema is modified concurrently. QueryExecModeDescribeExec // Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol @@ -755,7 +754,6 @@ optionLoop: } c.eqb.reset() - anynil.NormalizeSlice(args) rows := c.getRows(ctx, sql, args) var err error diff --git a/vendor/github.com/jackc/pgx/v5/doc.go b/vendor/github.com/jackc/pgx/v5/doc.go index db99fc4cbb8..bc0391dde30 100644 --- a/vendor/github.com/jackc/pgx/v5/doc.go +++ b/vendor/github.com/jackc/pgx/v5/doc.go @@ -11,9 +11,10 @@ The primary way of establishing a connection is with [pgx.Connect]: conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL")) -The database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified -here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the connection with -[ConnectConfig] to configure settings such as tracing that cannot be configured with a connection string. +The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be +specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the +connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection +string. Connection Pool @@ -23,8 +24,8 @@ github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool. Query Interface pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and -ForEachRow that are a simpler and safer way of processing rows than manually calling rows.Next(), rows.Scan, and -rows.Err(). +ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(), +rows.Scan, and rows.Err(). CollectRows can be used collect all returned rows into a slice. diff --git a/vendor/github.com/jackc/pgx/v5/extended_query_builder.go b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go index 9c9de5b2cf8..526b0e953be 100644 --- a/vendor/github.com/jackc/pgx/v5/extended_query_builder.go +++ b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go @@ -1,10 +1,8 @@ package pgx import ( - "database/sql/driver" "fmt" - "github.com/jackc/pgx/v5/internal/anynil" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" ) @@ -23,10 +21,15 @@ type ExtendedQueryBuilder struct { func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error { eqb.reset() - anynil.NormalizeSlice(args) - if sd == nil { - return eqb.appendParamsForQueryExecModeExec(m, args) + for i := range args { + err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i]) + if err != nil { + err = fmt.Errorf("failed to encode args[%d]: %w", i, err) + return err + } + } + return nil } if len(sd.ParamOIDs) != len(args) { @@ -113,10 +116,6 @@ func (eqb *ExtendedQueryBuilder) reset() { } func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) { - if anynil.Is(arg) { - return nil, nil - } - if eqb.paramValueBytes == nil { eqb.paramValueBytes = make([]byte, 0, 128) } @@ -145,74 +144,3 @@ func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid ui return m.FormatCodeForOID(oid) } - -// appendParamsForQueryExecModeExec appends the args to eqb. -// -// Parameters must be encoded in the text format because of differences in type conversion between timestamps and -// dates. In QueryExecModeExec we don't know what the actual PostgreSQL type is. To determine the type we use the -// Go type to OID type mapping registered by RegisterDefaultPgType. However, the Go time.Time represents both -// PostgreSQL timestamp[tz] and date. To use the binary format we would need to also specify what the PostgreSQL -// type OID is. But that would mean telling PostgreSQL that we have sent a timestamp[tz] when what is needed is a date. -// This means that the value is converted from text to timestamp[tz] to date. This means it does a time zone conversion -// before converting it to date. This means that dates can be shifted by one day. In text format without that double -// type conversion it takes the date directly and ignores time zone (i.e. it works). -// -// Given that the whole point of QueryExecModeExec is to operate without having to know the PostgreSQL types there is -// no way to safely use binary or to specify the parameter OIDs. -func (eqb *ExtendedQueryBuilder) appendParamsForQueryExecModeExec(m *pgtype.Map, args []any) error { - for _, arg := range args { - if arg == nil { - err := eqb.appendParam(m, 0, TextFormatCode, arg) - if err != nil { - return err - } - } else { - dt, ok := m.TypeForValue(arg) - if !ok { - var tv pgtype.TextValuer - if tv, ok = arg.(pgtype.TextValuer); ok { - t, err := tv.TextValue() - if err != nil { - return err - } - - dt, ok = m.TypeForOID(pgtype.TextOID) - if ok { - arg = t - } - } - } - if !ok { - var dv driver.Valuer - if dv, ok = arg.(driver.Valuer); ok { - v, err := dv.Value() - if err != nil { - return err - } - dt, ok = m.TypeForValue(v) - if ok { - arg = v - } - } - } - if !ok { - var str fmt.Stringer - if str, ok = arg.(fmt.Stringer); ok { - dt, ok = m.TypeForOID(pgtype.TextOID) - if ok { - arg = str.String() - } - } - } - if !ok { - return &unknownArgumentTypeQueryExecModeExecError{arg: arg} - } - err := eqb.appendParam(m, dt.OID, TextFormatCode, arg) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/jackc/pgx/v5/internal/anynil/anynil.go b/vendor/github.com/jackc/pgx/v5/internal/anynil/anynil.go deleted file mode 100644 index 9a48c1a844c..00000000000 --- a/vendor/github.com/jackc/pgx/v5/internal/anynil/anynil.go +++ /dev/null @@ -1,36 +0,0 @@ -package anynil - -import "reflect" - -// Is returns true if value is any type of nil. e.g. nil or []byte(nil). -func Is(value any) bool { - if value == nil { - return true - } - - refVal := reflect.ValueOf(value) - switch refVal.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: - return refVal.IsNil() - default: - return false - } -} - -// Normalize converts typed nils (e.g. []byte(nil)) into untyped nil. Other values are returned unmodified. -func Normalize(v any) any { - if Is(v) { - return nil - } - return v -} - -// NormalizeSlice converts all typed nils (e.g. []byte(nil)) in s into untyped nils. Other values are unmodified. s is -// mutated in place. -func NormalizeSlice(s []any) { - for i := range s { - if Is(s[i]) { - s[i] = nil - } - } -} diff --git a/vendor/github.com/jackc/pgx/v5/large_objects.go b/vendor/github.com/jackc/pgx/v5/large_objects.go index a3028b63854..9d21afdce91 100644 --- a/vendor/github.com/jackc/pgx/v5/large_objects.go +++ b/vendor/github.com/jackc/pgx/v5/large_objects.go @@ -4,6 +4,8 @@ import ( "context" "errors" "io" + + "github.com/jackc/pgx/v5/pgtype" ) // The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of @@ -115,9 +117,10 @@ func (o *LargeObject) Read(p []byte) (int, error) { expected = maxLargeObjectMessageLength } - var res []byte + res := pgtype.PreallocBytes(p[nTotal:]) err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res) - copy(p[nTotal:], res) + // We compute expected so that it always fits into p, so it should never happen + // that PreallocBytes's ScanBytes had to allocate a new slice. nTotal += len(res) if err != nil { return nTotal, err diff --git a/vendor/github.com/jackc/pgx/v5/named_args.go b/vendor/github.com/jackc/pgx/v5/named_args.go index 8367fc63a2b..c88991ee4a9 100644 --- a/vendor/github.com/jackc/pgx/v5/named_args.go +++ b/vendor/github.com/jackc/pgx/v5/named_args.go @@ -2,6 +2,7 @@ package pgx import ( "context" + "fmt" "strconv" "strings" "unicode/utf8" @@ -21,6 +22,34 @@ type NamedArgs map[string]any // RewriteQuery implements the QueryRewriter interface. func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) { + return rewriteQuery(na, sql, false) +} + +// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all +// named arguments that the sql query uses, and no extra arguments. +type StrictNamedArgs map[string]any + +// RewriteQuery implements the QueryRewriter interface. +func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) { + return rewriteQuery(sna, sql, true) +} + +type namedArg string + +type sqlLexer struct { + src string + start int + pos int + nested int // multiline comment nesting level. + stateFn stateFn + parts []any + + nameToOrdinal map[namedArg]int +} + +type stateFn func(*sqlLexer) stateFn + +func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) { l := &sqlLexer{ src: sql, stateFn: rawState, @@ -44,27 +73,24 @@ func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, ar newArgs = make([]any, len(l.nameToOrdinal)) for name, ordinal := range l.nameToOrdinal { - newArgs[ordinal-1] = na[string(name)] + var found bool + newArgs[ordinal-1], found = na[string(name)] + if isStrict && !found { + return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name) + } } - return sb.String(), newArgs, nil -} - -type namedArg string - -type sqlLexer struct { - src string - start int - pos int - nested int // multiline comment nesting level. - stateFn stateFn - parts []any + if isStrict { + for name := range na { + if _, found := l.nameToOrdinal[namedArg(name)]; !found { + return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name) + } + } + } - nameToOrdinal map[namedArg]int + return sb.String(), newArgs, nil } -type stateFn func(*sqlLexer) stateFn - func rawState(l *sqlLexer) stateFn { for { r, width := utf8.DecodeRuneInString(l.src[l.pos:]) diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/config.go b/vendor/github.com/jackc/pgx/v5/pgconn/config.go index 33a72257914..598917f5541 100644 --- a/vendor/github.com/jackc/pgx/v5/pgconn/config.go +++ b/vendor/github.com/jackc/pgx/v5/pgconn/config.go @@ -19,6 +19,7 @@ import ( "github.com/jackc/pgpassfile" "github.com/jackc/pgservicefile" + "github.com/jackc/pgx/v5/pgconn/ctxwatch" "github.com/jackc/pgx/v5/pgproto3" ) @@ -39,7 +40,12 @@ type Config struct { DialFunc DialFunc // e.g. net.Dialer.DialContext LookupFunc LookupFunc // e.g. net.Resolver.LookupHost BuildFrontend BuildFrontendFunc - RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name) + + // BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called + // when a context passed to a PgConn method is canceled. + BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler + + RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name) KerberosSrvName string KerberosSpn string @@ -70,7 +76,7 @@ type Config struct { // ParseConfigOptions contains options that control how a config is built such as GetSSLPassword. type ParseConfigOptions struct { - // GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the the libpq function + // GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function // PQsetSSLKeyPassHook_OpenSSL. GetSSLPassword GetSSLPasswordFunc } @@ -112,6 +118,14 @@ type FallbackConfig struct { TLSConfig *tls.Config // nil disables TLS } +// connectOneConfig is the configuration for a single attempt to connect to a single host. +type connectOneConfig struct { + network string + address string + originalHostname string // original hostname before resolving + tlsConfig *tls.Config // nil disables TLS +} + // isAbsolutePath checks if the provided value is an absolute path either // beginning with a forward slash (as on Linux-based systems) or with a capital // letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows). @@ -146,11 +160,11 @@ func NetworkAddress(host string, port uint16) (network, address string) { // ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It // uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely -// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format (DSN style). -// See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be -// empty to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file. +// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See +// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty +// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file. // -// # Example DSN +// # Example Keyword/Value // user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca // // # Example URL @@ -169,7 +183,7 @@ func NetworkAddress(host string, port uint16) (network, address string) { // postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb // // ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed -// via database URL or DSN: +// via database URL or keyword/value: // // PGHOST // PGPORT @@ -233,16 +247,16 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con connStringSettings := make(map[string]string) if connString != "" { var err error - // connString may be a database URL or a DSN + // connString may be a database URL or in PostgreSQL keyword/value format if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") { connStringSettings, err = parseURLSettings(connString) if err != nil { return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err} } } else { - connStringSettings, err = parseDSNSettings(connString) + connStringSettings, err = parseKeywordValueSettings(connString) if err != nil { - return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as DSN", err: err} + return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err} } } } @@ -266,6 +280,9 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend { return pgproto3.NewFrontend(r, w) }, + BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler { + return &DeadlineContextWatcherHandler{Conn: pgConn.conn} + }, OnPgError: func(_ *PgConn, pgErr *PgError) bool { // we want to automatically close any fatal errors if strings.EqualFold(pgErr.Severity, "FATAL") { @@ -517,7 +534,7 @@ func isIPOnly(host string) bool { var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1} -func parseDSNSettings(s string) (map[string]string, error) { +func parseKeywordValueSettings(s string) (map[string]string, error) { settings := make(map[string]string) nameMap := map[string]string{ @@ -528,7 +545,7 @@ func parseDSNSettings(s string) (map[string]string, error) { var key, val string eqIdx := strings.IndexRune(s, '=') if eqIdx < 0 { - return nil, errors.New("invalid dsn") + return nil, errors.New("invalid keyword/value") } key = strings.Trim(s[:eqIdx], " \t\n\r\v\f") @@ -580,7 +597,7 @@ func parseDSNSettings(s string) (map[string]string, error) { } if key == "" { - return nil, errors.New("invalid dsn") + return nil, errors.New("invalid keyword/value") } settings[key] = val @@ -800,7 +817,8 @@ func parsePort(s string) (uint16, error) { } func makeDefaultDialer() *net.Dialer { - return &net.Dialer{KeepAlive: 5 * time.Minute} + // rely on GOLANG KeepAlive settings + return &net.Dialer{} } func makeDefaultResolver() *net.Resolver { diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/internal/ctxwatch/context_watcher.go b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go similarity index 71% rename from vendor/github.com/jackc/pgx/v5/pgconn/internal/ctxwatch/context_watcher.go rename to vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go index b39cb3ee57a..db8884eb88c 100644 --- a/vendor/github.com/jackc/pgx/v5/pgconn/internal/ctxwatch/context_watcher.go +++ b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go @@ -8,9 +8,8 @@ import ( // ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a // time. type ContextWatcher struct { - onCancel func() - onUnwatchAfterCancel func() - unwatchChan chan struct{} + handler Handler + unwatchChan chan struct{} lock sync.Mutex watchInProgress bool @@ -20,11 +19,10 @@ type ContextWatcher struct { // NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled. // OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and // onCancel called. -func NewContextWatcher(onCancel func(), onUnwatchAfterCancel func()) *ContextWatcher { +func NewContextWatcher(handler Handler) *ContextWatcher { cw := &ContextWatcher{ - onCancel: onCancel, - onUnwatchAfterCancel: onUnwatchAfterCancel, - unwatchChan: make(chan struct{}), + handler: handler, + unwatchChan: make(chan struct{}), } return cw @@ -46,7 +44,7 @@ func (cw *ContextWatcher) Watch(ctx context.Context) { go func() { select { case <-ctx.Done(): - cw.onCancel() + cw.handler.HandleCancel(ctx) cw.onCancelWasCalled = true <-cw.unwatchChan case <-cw.unwatchChan: @@ -66,8 +64,17 @@ func (cw *ContextWatcher) Unwatch() { if cw.watchInProgress { cw.unwatchChan <- struct{}{} if cw.onCancelWasCalled { - cw.onUnwatchAfterCancel() + cw.handler.HandleUnwatchAfterCancel() } cw.watchInProgress = false } } + +type Handler interface { + // HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the + // context that was canceled. + HandleCancel(canceledCtx context.Context) + + // HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched. + HandleUnwatchAfterCancel() +} diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/doc.go b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go index e3242cf4e15..701375019c8 100644 --- a/vendor/github.com/jackc/pgx/v5/pgconn/doc.go +++ b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go @@ -5,8 +5,8 @@ nearly the same level is the C library libpq. Establishing a Connection -Use Connect to establish a connection. It accepts a connection string in URL or DSN and will read the environment for -libpq style environment variables. +Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the +environment for libpq style environment variables. Executing a Query @@ -20,13 +20,17 @@ result. The ReadAll method reads all query results into memory. Pipeline Mode -Pipeline mode allows sending queries without having read the results of previously sent queries. It allows -control of exactly how many and when network round trips occur. +Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of +exactly how many and when network round trips occur. Context Support -All potentially blocking operations take a context.Context. If a context is canceled while the method is in progress the -method immediately returns. In most circumstances, this will close the underlying connection. +All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the +method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can +be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior. +This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is +a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before +interrupting the query in such a way as to close the connection. The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the client to abort. diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/errors.go b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go index c315739a9a2..ec4a6d47c94 100644 --- a/vendor/github.com/jackc/pgx/v5/pgconn/errors.go +++ b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go @@ -12,13 +12,14 @@ import ( // SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server. func SafeToRetry(err error) bool { - if e, ok := err.(interface{ SafeToRetry() bool }); ok { - return e.SafeToRetry() + var retryableErr interface{ SafeToRetry() bool } + if errors.As(err, &retryableErr) { + return retryableErr.SafeToRetry() } return false } -// Timeout checks if err was was caused by a timeout. To be specific, it is true if err was caused within pgconn by a +// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a // context.DeadlineExceeded or an implementer of net.Error where Timeout() is true. func Timeout(err error) bool { var timeoutErr *errTimeout @@ -29,23 +30,24 @@ func Timeout(err error) bool { // http://www.postgresql.org/docs/11/static/protocol-error-fields.html for // detailed field description. type PgError struct { - Severity string - Code string - Message string - Detail string - Hint string - Position int32 - InternalPosition int32 - InternalQuery string - Where string - SchemaName string - TableName string - ColumnName string - DataTypeName string - ConstraintName string - File string - Line int32 - Routine string + Severity string + SeverityUnlocalized string + Code string + Message string + Detail string + Hint string + Position int32 + InternalPosition int32 + InternalQuery string + Where string + SchemaName string + TableName string + ColumnName string + DataTypeName string + ConstraintName string + File string + Line int32 + Routine string } func (pe *PgError) Error() string { @@ -60,23 +62,37 @@ func (pe *PgError) SQLState() string { // ConnectError is the error returned when a connection attempt fails. type ConnectError struct { Config *Config // The configuration that was used in the connection attempt. - msg string err error } func (e *ConnectError) Error() string { - sb := &strings.Builder{} - fmt.Fprintf(sb, "failed to connect to `host=%s user=%s database=%s`: %s", e.Config.Host, e.Config.User, e.Config.Database, e.msg) - if e.err != nil { - fmt.Fprintf(sb, " (%s)", e.err.Error()) + prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database) + details := e.err.Error() + if strings.Contains(details, "\n") { + return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t") + } else { + return prefix + " " + details } - return sb.String() } func (e *ConnectError) Unwrap() error { return e.err } +type perDialConnectError struct { + address string + originalHostname string + err error +} + +func (e *perDialConnectError) Error() string { + return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error()) +} + +func (e *perDialConnectError) Unwrap() error { + return e.err +} + type connLockError struct { status string } @@ -195,10 +211,10 @@ func redactPW(connString string) string { return redactURL(u) } } - quotedDSN := regexp.MustCompile(`password='[^']*'`) - connString = quotedDSN.ReplaceAllLiteralString(connString, "password=xxxxx") - plainDSN := regexp.MustCompile(`password=[^ ]*`) - connString = plainDSN.ReplaceAllLiteralString(connString, "password=xxxxx") + quotedKV := regexp.MustCompile(`password='[^']*'`) + connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx") + plainKV := regexp.MustCompile(`password=[^ ]*`) + connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx") brokenURL := regexp.MustCompile(`:[^:@]+?@`) connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@") return connString diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go index 0bf03f335a0..7efb522a493 100644 --- a/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go +++ b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go @@ -18,8 +18,8 @@ import ( "github.com/jackc/pgx/v5/internal/iobufpool" "github.com/jackc/pgx/v5/internal/pgio" + "github.com/jackc/pgx/v5/pgconn/ctxwatch" "github.com/jackc/pgx/v5/pgconn/internal/bgreader" - "github.com/jackc/pgx/v5/pgconn/internal/ctxwatch" "github.com/jackc/pgx/v5/pgproto3" ) @@ -82,6 +82,8 @@ type PgConn struct { slowWriteTimer *time.Timer bgReaderStarted chan struct{} + customData map[string]any + config *Config status byte // One of connStatus* constants @@ -103,8 +105,9 @@ type PgConn struct { cleanupDone chan struct{} } -// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or DSN format) -// to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a connect attempt. +// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value +// format) to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a +// connect attempt. func Connect(ctx context.Context, connString string) (*PgConn, error) { config, err := ParseConfig(connString) if err != nil { @@ -114,9 +117,9 @@ func Connect(ctx context.Context, connString string) (*PgConn, error) { return ConnectConfig(ctx, config) } -// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or DSN format) -// and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details. ctx can be -// used to cancel a connect attempt. +// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value +// format) and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details. +// ctx can be used to cancel a connect attempt. func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptions ParseConfigOptions) (*PgConn, error) { config, err := ParseConfigWithOptions(connString, parseConfigOptions) if err != nil { @@ -131,113 +134,77 @@ func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptio // // If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An // authentication error will terminate the chain of attempts (like libpq: -// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error. Otherwise, -// if all attempts fail the last error is returned. -func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err error) { +// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error. +func ConnectConfig(ctx context.Context, config *Config) (*PgConn, error) { // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from // zero values. if !config.createdByParseConfig { panic("config must be created by ParseConfig") } - // Simplify usage by treating primary config and fallbacks the same. - fallbackConfigs := []*FallbackConfig{ - { - Host: config.Host, - Port: config.Port, - TLSConfig: config.TLSConfig, - }, - } - fallbackConfigs = append(fallbackConfigs, config.Fallbacks...) - ctx := octx - fallbackConfigs, err = expandWithIPs(ctx, config.LookupFunc, fallbackConfigs) - if err != nil { - return nil, &ConnectError{Config: config, msg: "hostname resolving error", err: err} - } + var allErrors []error - if len(fallbackConfigs) == 0 { - return nil, &ConnectError{Config: config, msg: "hostname resolving error", err: errors.New("ip addr wasn't found")} - } - - foundBestServer := false - var fallbackConfig *FallbackConfig - for i, fc := range fallbackConfigs { - // ConnectTimeout restricts the whole connection process. - if config.ConnectTimeout != 0 { - // create new context first time or when previous host was different - if i == 0 || (fallbackConfigs[i].Host != fallbackConfigs[i-1].Host) { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout) - defer cancel() - } - } else { - ctx = octx - } - pgConn, err = connect(ctx, config, fc, false) - if err == nil { - foundBestServer = true - break - } else if pgerr, ok := err.(*PgError); ok { - err = &ConnectError{Config: config, msg: "server error", err: pgerr} - const ERRCODE_INVALID_PASSWORD = "28P01" // wrong password - const ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION = "28000" // wrong password or bad pg_hba.conf settings - const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist - const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege - if pgerr.Code == ERRCODE_INVALID_PASSWORD || - pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && fc.TLSConfig != nil || - pgerr.Code == ERRCODE_INVALID_CATALOG_NAME || - pgerr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE { - break - } - } else if cerr, ok := err.(*ConnectError); ok { - if _, ok := cerr.err.(*NotPreferredError); ok { - fallbackConfig = fc - } - } + connectConfigs, errs := buildConnectOneConfigs(ctx, config) + if len(errs) > 0 { + allErrors = append(allErrors, errs...) } - if !foundBestServer && fallbackConfig != nil { - pgConn, err = connect(ctx, config, fallbackConfig, true) - if pgerr, ok := err.(*PgError); ok { - err = &ConnectError{Config: config, msg: "server error", err: pgerr} - } + if len(connectConfigs) == 0 { + return nil, &ConnectError{Config: config, err: fmt.Errorf("hostname resolving error: %w", errors.Join(allErrors...))} } - if err != nil { - return nil, err // no need to wrap in connectError because it will already be wrapped in all cases except PgError + pgConn, errs := connectPreferred(ctx, config, connectConfigs) + if len(errs) > 0 { + allErrors = append(allErrors, errs...) + return nil, &ConnectError{Config: config, err: errors.Join(allErrors...)} } if config.AfterConnect != nil { err := config.AfterConnect(ctx, pgConn) if err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "AfterConnect error", err: err} + return nil, &ConnectError{Config: config, err: fmt.Errorf("AfterConnect error: %w", err)} } } return pgConn, nil } -func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*FallbackConfig) ([]*FallbackConfig, error) { - var configs []*FallbackConfig +// buildConnectOneConfigs resolves hostnames and builds a list of connectOneConfigs to try connecting to. It returns a +// slice of successfully resolved connectOneConfigs and a slice of errors. It is possible for both slices to contain +// values if some hosts were successfully resolved and others were not. +func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneConfig, []error) { + // Simplify usage by treating primary config and fallbacks the same. + fallbackConfigs := []*FallbackConfig{ + { + Host: config.Host, + Port: config.Port, + TLSConfig: config.TLSConfig, + }, + } + fallbackConfigs = append(fallbackConfigs, config.Fallbacks...) + + var configs []*connectOneConfig - var lookupErrors []error + var allErrors []error - for _, fb := range fallbacks { + for _, fb := range fallbackConfigs { // skip resolve for unix sockets if isAbsolutePath(fb.Host) { - configs = append(configs, &FallbackConfig{ - Host: fb.Host, - Port: fb.Port, - TLSConfig: fb.TLSConfig, + network, address := NetworkAddress(fb.Host, fb.Port) + configs = append(configs, &connectOneConfig{ + network: network, + address: address, + originalHostname: fb.Host, + tlsConfig: fb.TLSConfig, }) continue } - ips, err := lookupFn(ctx, fb.Host) + ips, err := config.LookupFunc(ctx, fb.Host) if err != nil { - lookupErrors = append(lookupErrors, err) + allErrors = append(allErrors, err) continue } @@ -246,63 +213,126 @@ func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*Fallba if err == nil { port, err := strconv.ParseUint(splitPort, 10, 16) if err != nil { - return nil, fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err) + return nil, []error{fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)} } - configs = append(configs, &FallbackConfig{ - Host: splitIP, - Port: uint16(port), - TLSConfig: fb.TLSConfig, + network, address := NetworkAddress(splitIP, uint16(port)) + configs = append(configs, &connectOneConfig{ + network: network, + address: address, + originalHostname: fb.Host, + tlsConfig: fb.TLSConfig, }) } else { - configs = append(configs, &FallbackConfig{ - Host: ip, - Port: fb.Port, - TLSConfig: fb.TLSConfig, + network, address := NetworkAddress(ip, fb.Port) + configs = append(configs, &connectOneConfig{ + network: network, + address: address, + originalHostname: fb.Host, + tlsConfig: fb.TLSConfig, }) } } } - // See https://github.com/jackc/pgx/issues/1464. When Go 1.20 can be used in pgx consider using errors.Join so all - // errors are reported. - if len(configs) == 0 && len(lookupErrors) > 0 { - return nil, lookupErrors[0] + return configs, allErrors +} + +// connectPreferred attempts to connect to the preferred host from connectOneConfigs. The connections are attempted in +// order. If a connection is successful it is returned. If no connection is successful then all errors are returned. If +// a connection attempt returns a [NotPreferredError], then that host will be used if no other hosts are successful. +func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*connectOneConfig) (*PgConn, []error) { + octx := ctx + var allErrors []error + + var fallbackConnectOneConfig *connectOneConfig + for i, c := range connectOneConfigs { + // ConnectTimeout restricts the whole connection process. + if config.ConnectTimeout != 0 { + // create new context first time or when previous host was different + if i == 0 || (connectOneConfigs[i].address != connectOneConfigs[i-1].address) { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout) + defer cancel() + } + } else { + ctx = octx + } + + pgConn, err := connectOne(ctx, config, c, false) + if pgConn != nil { + return pgConn, nil + } + + allErrors = append(allErrors, err) + + var pgErr *PgError + if errors.As(err, &pgErr) { + const ERRCODE_INVALID_PASSWORD = "28P01" // wrong password + const ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION = "28000" // wrong password or bad pg_hba.conf settings + const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist + const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege + if pgErr.Code == ERRCODE_INVALID_PASSWORD || + pgErr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && c.tlsConfig != nil || + pgErr.Code == ERRCODE_INVALID_CATALOG_NAME || + pgErr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE { + return nil, allErrors + } + } + + var npErr *NotPreferredError + if errors.As(err, &npErr) { + fallbackConnectOneConfig = c + } + } + + if fallbackConnectOneConfig != nil { + pgConn, err := connectOne(ctx, config, fallbackConnectOneConfig, true) + if err == nil { + return pgConn, nil + } + allErrors = append(allErrors, err) } - return configs, nil + return nil, allErrors } -func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig, +// connectOne makes one connection attempt to a single host. +func connectOne(ctx context.Context, config *Config, connectConfig *connectOneConfig, ignoreNotPreferredErr bool, ) (*PgConn, error) { pgConn := new(PgConn) pgConn.config = config pgConn.cleanupDone = make(chan struct{}) + pgConn.customData = make(map[string]any) var err error - network, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port) - netConn, err := config.DialFunc(ctx, network, address) - if err != nil { - return nil, &ConnectError{Config: config, msg: "dial error", err: normalizeTimeoutError(ctx, err)} + + newPerDialConnectError := func(msg string, err error) *perDialConnectError { + err = normalizeTimeoutError(ctx, err) + e := &perDialConnectError{address: connectConfig.address, originalHostname: connectConfig.originalHostname, err: fmt.Errorf("%s: %w", msg, err)} + return e } - pgConn.conn = netConn - pgConn.contextWatcher = newContextWatcher(netConn) - pgConn.contextWatcher.Watch(ctx) + pgConn.conn, err = config.DialFunc(ctx, connectConfig.network, connectConfig.address) + if err != nil { + return nil, newPerDialConnectError("dial error", err) + } - if fallbackConfig.TLSConfig != nil { - nbTLSConn, err := startTLS(netConn, fallbackConfig.TLSConfig) + if connectConfig.tlsConfig != nil { + pgConn.contextWatcher = ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: pgConn.conn}) + pgConn.contextWatcher.Watch(ctx) + tlsConn, err := startTLS(pgConn.conn, connectConfig.tlsConfig) pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS. if err != nil { - netConn.Close() - return nil, &ConnectError{Config: config, msg: "tls error", err: normalizeTimeoutError(ctx, err)} + pgConn.conn.Close() + return nil, newPerDialConnectError("tls error", err) } - pgConn.conn = nbTLSConn - pgConn.contextWatcher = newContextWatcher(nbTLSConn) - pgConn.contextWatcher.Watch(ctx) + pgConn.conn = tlsConn } + pgConn.contextWatcher = ctxwatch.NewContextWatcher(config.BuildContextWatcherHandler(pgConn)) + pgConn.contextWatcher.Watch(ctx) defer pgConn.contextWatcher.Unwatch() pgConn.parameterStatuses = make(map[string]string) @@ -336,7 +366,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig pgConn.frontend.Send(&startupMsg) if err := pgConn.flushWithPotentialWriteReadDeadlock(); err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "failed to write startup message", err: normalizeTimeoutError(ctx, err)} + return nil, newPerDialConnectError("failed to write startup message", err) } for { @@ -344,9 +374,9 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig if err != nil { pgConn.conn.Close() if err, ok := err.(*PgError); ok { - return nil, err + return nil, newPerDialConnectError("server error", err) } - return nil, &ConnectError{Config: config, msg: "failed to receive message", err: normalizeTimeoutError(ctx, err)} + return nil, newPerDialConnectError("failed to receive message", err) } switch msg := msg.(type) { @@ -359,26 +389,26 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig err = pgConn.txPasswordMessage(pgConn.config.Password) if err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "failed to write password message", err: err} + return nil, newPerDialConnectError("failed to write password message", err) } case *pgproto3.AuthenticationMD5Password: digestedPassword := "md5" + hexMD5(hexMD5(pgConn.config.Password+pgConn.config.User)+string(msg.Salt[:])) err = pgConn.txPasswordMessage(digestedPassword) if err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "failed to write password message", err: err} + return nil, newPerDialConnectError("failed to write password message", err) } case *pgproto3.AuthenticationSASL: err = pgConn.scramAuth(msg.AuthMechanisms) if err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "failed SASL auth", err: err} + return nil, newPerDialConnectError("failed SASL auth", err) } case *pgproto3.AuthenticationGSS: err = pgConn.gssAuth() if err != nil { pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "failed GSS auth", err: err} + return nil, newPerDialConnectError("failed GSS auth", err) } case *pgproto3.ReadyForQuery: pgConn.status = connStatusIdle @@ -396,7 +426,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig return pgConn, nil } pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "ValidateConnect failed", err: err} + return nil, newPerDialConnectError("ValidateConnect failed", err) } } return pgConn, nil @@ -404,21 +434,14 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig // handled by ReceiveMessage case *pgproto3.ErrorResponse: pgConn.conn.Close() - return nil, ErrorResponseToPgError(msg) + return nil, newPerDialConnectError("server error", ErrorResponseToPgError(msg)) default: pgConn.conn.Close() - return nil, &ConnectError{Config: config, msg: "received unexpected message", err: err} + return nil, newPerDialConnectError("received unexpected message", err) } } } -func newContextWatcher(conn net.Conn) *ctxwatch.ContextWatcher { - return ctxwatch.NewContextWatcher( - func() { conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) }, - func() { conn.SetDeadline(time.Time{}) }, - ) -} - func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) { err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103}) if err != nil { @@ -928,23 +951,24 @@ func (pgConn *PgConn) Deallocate(ctx context.Context, name string) error { // ErrorResponseToPgError converts a wire protocol error message to a *PgError. func ErrorResponseToPgError(msg *pgproto3.ErrorResponse) *PgError { return &PgError{ - Severity: msg.Severity, - Code: string(msg.Code), - Message: string(msg.Message), - Detail: string(msg.Detail), - Hint: msg.Hint, - Position: msg.Position, - InternalPosition: msg.InternalPosition, - InternalQuery: string(msg.InternalQuery), - Where: string(msg.Where), - SchemaName: string(msg.SchemaName), - TableName: string(msg.TableName), - ColumnName: string(msg.ColumnName), - DataTypeName: string(msg.DataTypeName), - ConstraintName: msg.ConstraintName, - File: string(msg.File), - Line: msg.Line, - Routine: string(msg.Routine), + Severity: msg.Severity, + SeverityUnlocalized: msg.SeverityUnlocalized, + Code: string(msg.Code), + Message: string(msg.Message), + Detail: string(msg.Detail), + Hint: msg.Hint, + Position: msg.Position, + InternalPosition: msg.InternalPosition, + InternalQuery: string(msg.InternalQuery), + Where: string(msg.Where), + SchemaName: string(msg.SchemaName), + TableName: string(msg.TableName), + ColumnName: string(msg.ColumnName), + DataTypeName: string(msg.DataTypeName), + ConstraintName: msg.ConstraintName, + File: string(msg.File), + Line: msg.Line, + Routine: string(msg.Routine), } } @@ -987,10 +1011,7 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error { defer cancelConn.Close() if ctx != context.Background() { - contextWatcher := ctxwatch.NewContextWatcher( - func() { cancelConn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) }, - func() { cancelConn.SetDeadline(time.Time{}) }, - ) + contextWatcher := ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: cancelConn}) contextWatcher.Watch(ctx) defer contextWatcher.Unwatch() } @@ -1523,8 +1544,10 @@ func (rr *ResultReader) Read() *Result { values := rr.Values() row := make([][]byte, len(values)) for i := range row { - row[i] = make([]byte, len(values[i])) - copy(row[i], values[i]) + if values[i] != nil { + row[i] = make([]byte, len(values[i])) + copy(row[i], values[i]) + } } br.Rows = append(br.Rows, row) } @@ -1879,6 +1902,11 @@ func (pgConn *PgConn) SyncConn(ctx context.Context) error { return errors.New("SyncConn: conn never synchronized") } +// CustomData returns a map that can be used to associate custom data with the connection. +func (pgConn *PgConn) CustomData() map[string]any { + return pgConn.customData +} + // HijackedConn is the result of hijacking a connection. // // Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning @@ -1891,6 +1919,7 @@ type HijackedConn struct { TxStatus byte Frontend *pgproto3.Frontend Config *Config + CustomData map[string]any } // Hijack extracts the internal connection data. pgConn must be in an idle state. SyncConn should be called immediately @@ -1913,6 +1942,7 @@ func (pgConn *PgConn) Hijack() (*HijackedConn, error) { TxStatus: pgConn.txStatus, Frontend: pgConn.frontend, Config: pgConn.config, + CustomData: pgConn.customData, }, nil } @@ -1932,13 +1962,14 @@ func Construct(hc *HijackedConn) (*PgConn, error) { txStatus: hc.TxStatus, frontend: hc.Frontend, config: hc.Config, + customData: hc.CustomData, status: connStatusIdle, cleanupDone: make(chan struct{}), } - pgConn.contextWatcher = newContextWatcher(pgConn.conn) + pgConn.contextWatcher = ctxwatch.NewContextWatcher(hc.Config.BuildContextWatcherHandler(pgConn)) pgConn.bgReader = bgreader.New(pgConn.conn) pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), func() { @@ -2245,3 +2276,71 @@ func (p *Pipeline) Close() error { return p.err } + +// DeadlineContextWatcherHandler handles canceled contexts by setting a deadline on a net.Conn. +type DeadlineContextWatcherHandler struct { + Conn net.Conn + + // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled. + DeadlineDelay time.Duration +} + +func (h *DeadlineContextWatcherHandler) HandleCancel(ctx context.Context) { + h.Conn.SetDeadline(time.Now().Add(h.DeadlineDelay)) +} + +func (h *DeadlineContextWatcherHandler) HandleUnwatchAfterCancel() { + h.Conn.SetDeadline(time.Time{}) +} + +// CancelRequestContextWatcherHandler handles canceled contexts by sending a cancel request to the server. It also sets +// a deadline on a net.Conn as a fallback. +type CancelRequestContextWatcherHandler struct { + Conn *PgConn + + // CancelRequestDelay is the delay before sending the cancel request to the server. + CancelRequestDelay time.Duration + + // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled. + DeadlineDelay time.Duration + + cancelFinishedChan chan struct{} + handleUnwatchAfterCancelCalled func() +} + +func (h *CancelRequestContextWatcherHandler) HandleCancel(context.Context) { + h.cancelFinishedChan = make(chan struct{}) + var handleUnwatchedAfterCancelCalledCtx context.Context + handleUnwatchedAfterCancelCalledCtx, h.handleUnwatchAfterCancelCalled = context.WithCancel(context.Background()) + + deadline := time.Now().Add(h.DeadlineDelay) + h.Conn.conn.SetDeadline(deadline) + + go func() { + defer close(h.cancelFinishedChan) + + select { + case <-handleUnwatchedAfterCancelCalledCtx.Done(): + return + case <-time.After(h.CancelRequestDelay): + } + + cancelRequestCtx, cancel := context.WithDeadline(handleUnwatchedAfterCancelCalledCtx, deadline) + defer cancel() + h.Conn.CancelRequest(cancelRequestCtx) + + // CancelRequest is inherently racy. Even though the cancel request has been received by the server at this point, + // it hasn't necessarily been delivered to the other connection. If we immediately return and the connection is + // immediately used then it is possible the CancelRequest will actually cancel our next query. The + // TestCancelRequestContextWatcherHandler Stress test can produce this error without the sleep below. The sleep time + // is arbitrary, but should be sufficient to prevent this error case. + time.Sleep(100 * time.Millisecond) + }() +} + +func (h *CancelRequestContextWatcherHandler) HandleUnwatchAfterCancel() { + h.handleUnwatchAfterCancelCalled() + <-h.cancelFinishedChan + + h.Conn.conn.SetDeadline(time.Time{}) +} diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go index 480abfc06ac..128f97f871f 100644 --- a/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go +++ b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go @@ -99,7 +99,7 @@ func getValueFromJSON(v map[string]string) ([]byte, error) { return nil, errors.New("unknown protocol representation") } -// beginMessage begines a new message of type t. It appends the message type and a placeholder for the message length to +// beginMessage begins a new message of type t. It appends the message type and a placeholder for the message length to // dst. It returns the new buffer and the position of the message length placeholder. func beginMessage(dst []byte, t byte) ([]byte, int) { dst = append(dst, t) diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go index c1863b32abd..bf5f6989ac0 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go @@ -6,7 +6,6 @@ import ( "fmt" "reflect" - "github.com/jackc/pgx/v5/internal/anynil" "github.com/jackc/pgx/v5/internal/pgio" ) @@ -230,7 +229,7 @@ func (c *ArrayCodec) PlanScan(m *Map, oid uint32, format int16, target any) Scan // target / arrayScanner might be a pointer to a nil. If it is create one so we can call ScanIndexType to plan the // scan of the elements. - if anynil.Is(target) { + if isNil, _ := isNilDriverValuer(target); isNil { arrayScanner = reflect.New(reflect.TypeOf(target).Elem()).Interface().(ArraySetter) } diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go index ec9270acbfd..d56c1dc7010 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go @@ -139,6 +139,16 @@ Compatibility with database/sql pgtype also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer interfaces. +Encoding Typed Nils + +pgtype encodes untyped and typed nils (e.g. nil and []byte(nil)) to the SQL NULL value without going through the Codec +system. This means that Codecs and other encoding logic do not have to handle nil or *T(nil). + +However, database/sql compatibility requires Value to be called on T(nil) when T implements driver.Valuer. Therefore, +driver.Valuer values are only considered NULL when *T(nil) where driver.Valuer is implemented on T not on *T. See +https://github.com/golang/go/issues/8415 and +https://github.com/golang/go/commit/0ce1d79a6a771f7449ec493b993ed2a720917870. + Child Records pgtype's support for arrays and composite records can be used to load records and their children in a single query. See diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go index 218209380c2..06703d4dc11 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go @@ -132,22 +132,31 @@ func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte, if interval.Days != 0 { buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...) - buf = append(buf, " day "...) + buf = append(buf, " day"...) } - absMicroseconds := interval.Microseconds - if absMicroseconds < 0 { - absMicroseconds = -absMicroseconds - buf = append(buf, '-') - } + if interval.Microseconds != 0 { + buf = append(buf, " "...) + + absMicroseconds := interval.Microseconds + if absMicroseconds < 0 { + absMicroseconds = -absMicroseconds + buf = append(buf, '-') + } + + hours := absMicroseconds / microsecondsPerHour + minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute + seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond - hours := absMicroseconds / microsecondsPerHour - minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute - seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond - microseconds := absMicroseconds % microsecondsPerSecond + timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + buf = append(buf, timeStr...) + + microseconds := absMicroseconds % microsecondsPerSecond + if microseconds != 0 { + buf = append(buf, fmt.Sprintf(".%06d", microseconds)...) + } + } - timeStr := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, microseconds) - buf = append(buf, timeStr...) return buf, nil } diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/json.go b/vendor/github.com/jackc/pgx/v5/pgtype/json.go index 99628092a9e..e71dcb9bf9b 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/json.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/json.go @@ -8,17 +8,20 @@ import ( "reflect" ) -type JSONCodec struct{} +type JSONCodec struct { + Marshal func(v any) ([]byte, error) + Unmarshal func(data []byte, v any) error +} -func (JSONCodec) FormatSupported(format int16) bool { +func (*JSONCodec) FormatSupported(format int16) bool { return format == TextFormatCode || format == BinaryFormatCode } -func (JSONCodec) PreferredFormat() int16 { +func (*JSONCodec) PreferredFormat() int16 { return TextFormatCode } -func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { +func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { switch value.(type) { case string: return encodePlanJSONCodecEitherFormatString{} @@ -44,7 +47,9 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod // // https://github.com/jackc/pgx/issues/1681 case json.Marshaler: - return encodePlanJSONCodecEitherFormatMarshal{} + return &encodePlanJSONCodecEitherFormatMarshal{ + marshal: c.Marshal, + } } // Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the @@ -61,7 +66,9 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod } } - return encodePlanJSONCodecEitherFormatMarshal{} + return &encodePlanJSONCodecEitherFormatMarshal{ + marshal: c.Marshal, + } } type encodePlanJSONCodecEitherFormatString struct{} @@ -96,10 +103,12 @@ func (encodePlanJSONCodecEitherFormatJSONRawMessage) Encode(value any, buf []byt return buf, nil } -type encodePlanJSONCodecEitherFormatMarshal struct{} +type encodePlanJSONCodecEitherFormatMarshal struct { + marshal func(v any) ([]byte, error) +} -func (encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) { - jsonBytes, err := json.Marshal(value) +func (e *encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) { + jsonBytes, err := e.marshal(value) if err != nil { return nil, err } @@ -108,7 +117,7 @@ func (encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (new return buf, nil } -func (JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { +func (c *JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { switch target.(type) { case *string: return scanPlanAnyToString{} @@ -141,7 +150,9 @@ func (JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan return &scanPlanSQLScanner{formatCode: format} } - return scanPlanJSONToJSONUnmarshal{} + return &scanPlanJSONToJSONUnmarshal{ + unmarshal: c.Unmarshal, + } } type scanPlanAnyToString struct{} @@ -173,9 +184,11 @@ func (scanPlanJSONToBytesScanner) Scan(src []byte, dst any) error { return scanner.ScanBytes(src) } -type scanPlanJSONToJSONUnmarshal struct{} +type scanPlanJSONToJSONUnmarshal struct { + unmarshal func(data []byte, v any) error +} -func (scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error { +func (s *scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error { if src == nil { dstValue := reflect.ValueOf(dst) if dstValue.Kind() == reflect.Ptr { @@ -193,10 +206,10 @@ func (scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error { elem := reflect.ValueOf(dst).Elem() elem.Set(reflect.Zero(elem.Type())) - return json.Unmarshal(src, dst) + return s.unmarshal(src, dst) } -func (c JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { +func (c *JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { if src == nil { return nil, nil } @@ -206,12 +219,12 @@ func (c JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src return dstBuf, nil } -func (c JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { +func (c *JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { if src == nil { return nil, nil } var dst any - err := json.Unmarshal(src, &dst) + err := c.Unmarshal(src, &dst) return dst, err } diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go index 25555e7ffe4..4d4eb58e5b8 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go @@ -2,29 +2,31 @@ package pgtype import ( "database/sql/driver" - "encoding/json" "fmt" ) -type JSONBCodec struct{} +type JSONBCodec struct { + Marshal func(v any) ([]byte, error) + Unmarshal func(data []byte, v any) error +} -func (JSONBCodec) FormatSupported(format int16) bool { +func (*JSONBCodec) FormatSupported(format int16) bool { return format == TextFormatCode || format == BinaryFormatCode } -func (JSONBCodec) PreferredFormat() int16 { +func (*JSONBCodec) PreferredFormat() int16 { return TextFormatCode } -func (JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { +func (c *JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { switch format { case BinaryFormatCode: - plan := JSONCodec{}.PlanEncode(m, oid, TextFormatCode, value) + plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, TextFormatCode, value) if plan != nil { return &encodePlanJSONBCodecBinaryWrapper{textPlan: plan} } case TextFormatCode: - return JSONCodec{}.PlanEncode(m, oid, format, value) + return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, format, value) } return nil @@ -39,15 +41,15 @@ func (plan *encodePlanJSONBCodecBinaryWrapper) Encode(value any, buf []byte) (ne return plan.textPlan.Encode(value, buf) } -func (JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { +func (c *JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { switch format { case BinaryFormatCode: - plan := JSONCodec{}.PlanScan(m, oid, TextFormatCode, target) + plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, TextFormatCode, target) if plan != nil { return &scanPlanJSONBCodecBinaryUnwrapper{textPlan: plan} } case TextFormatCode: - return JSONCodec{}.PlanScan(m, oid, format, target) + return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, format, target) } return nil @@ -73,7 +75,7 @@ func (plan *scanPlanJSONBCodecBinaryUnwrapper) Scan(src []byte, dst any) error { return plan.textPlan.Scan(src[1:], dst) } -func (c JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { +func (c *JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { if src == nil { return nil, nil } @@ -100,7 +102,7 @@ func (c JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src } } -func (c JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { +func (c *JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { if src == nil { return nil, nil } @@ -122,6 +124,6 @@ func (c JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (a } var dst any - err := json.Unmarshal(src, &dst) + err := c.Unmarshal(src, &dst) return dst, err } diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go index 534ef6d16cd..4082956832d 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go @@ -41,6 +41,7 @@ const ( CircleOID = 718 CircleArrayOID = 719 UnknownOID = 705 + Macaddr8OID = 774 MacaddrOID = 829 InetOID = 869 BoolArrayOID = 1000 @@ -1330,7 +1331,7 @@ func (plan *derefPointerEncodePlan) Encode(value any, buf []byte) (newBuf []byte } // TryWrapDerefPointerEncodePlan tries to dereference a pointer. e.g. If value was of type *string then a wrapper plan -// would be returned that derefences the value. +// would be returned that dereferences the value. func TryWrapDerefPointerEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) { if _, ok := value.(driver.Valuer); ok { return nil, nil, false @@ -1911,8 +1912,17 @@ func newEncodeError(value any, m *Map, oid uint32, formatCode int16, err error) // (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data // written. func (m *Map) Encode(oid uint32, formatCode int16, value any, buf []byte) (newBuf []byte, err error) { - if value == nil { - return nil, nil + if isNil, callNilDriverValuer := isNilDriverValuer(value); isNil { + if callNilDriverValuer { + newBuf, err = (&encodePlanDriverValuer{m: m, oid: oid, formatCode: formatCode}).Encode(value, buf) + if err != nil { + return nil, newEncodeError(value, m, oid, formatCode, err) + } + + return newBuf, nil + } else { + return nil, nil + } } plan := m.PlanEncode(oid, formatCode, value) @@ -1967,3 +1977,55 @@ func (w *sqlScannerWrapper) Scan(src any) error { return w.m.Scan(t.OID, TextFormatCode, bufSrc, w.v) } + +// canBeNil returns true if value can be nil. +func canBeNil(value any) bool { + refVal := reflect.ValueOf(value) + kind := refVal.Kind() + switch kind { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return true + default: + return false + } +} + +// valuerReflectType is a reflect.Type for driver.Valuer. It has confusing syntax because reflect.TypeOf returns nil +// when it's argument is a nil interface value. So we use a pointer to the interface and call Elem to get the actual +// type. Yuck. +// +// This can be simplified in Go 1.22 with reflect.TypeFor. +// +// var valuerReflectType = reflect.TypeFor[driver.Valuer]() +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// isNilDriverValuer returns true if value is any type of nil unless it implements driver.Valuer. *T is not considered to implement +// driver.Valuer if it is only implemented by T. +func isNilDriverValuer(value any) (isNil bool, callNilDriverValuer bool) { + if value == nil { + return true, false + } + + refVal := reflect.ValueOf(value) + kind := refVal.Kind() + switch kind { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + if !refVal.IsNil() { + return false, false + } + + if _, ok := value.(driver.Valuer); ok { + if kind == reflect.Ptr { + // The type assertion will succeed if driver.Valuer is implemented on T or *T. Check if it is implemented on *T + // by checking if it is not implemented on *T. + return true, !refVal.Type().Elem().Implements(valuerReflectType) + } else { + return true, true + } + } + + return true, false + default: + return false, false + } +} diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go index c21ac081e29..9525f37c925 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go @@ -65,11 +65,12 @@ func initDefaultMap() { defaultMap.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}}) defaultMap.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}}) defaultMap.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}}) - defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: JSONCodec{}}) - defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: JSONBCodec{}}) + defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: &JSONCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}}) + defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: &JSONBCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}}) defaultMap.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}}) defaultMap.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}}) defaultMap.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}}) + defaultMap.RegisterType(&Type{Name: "macaddr8", OID: Macaddr8OID, Codec: MacaddrCodec{}}) defaultMap.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}}) defaultMap.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}}) defaultMap.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}}) @@ -81,8 +82,8 @@ func initDefaultMap() { defaultMap.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}}) defaultMap.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}}) defaultMap.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}}) - defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: TimestampCodec{}}) - defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: TimestamptzCodec{}}) + defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: &TimestampCodec{}}) + defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: &TimestamptzCodec{}}) defaultMap.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}}) defaultMap.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}}) defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}}) diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/time.go b/vendor/github.com/jackc/pgx/v5/pgtype/time.go index 2eb6ace28b5..61a3abdfdba 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/time.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/time.go @@ -45,7 +45,12 @@ func (t *Time) Scan(src any) error { switch src := src.(type) { case string: - return scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t) + err := scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t) + if err != nil { + t.Microseconds = 0 + t.Valid = false + } + return err } return fmt.Errorf("cannot scan %T", src) @@ -136,6 +141,8 @@ func (TimeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan switch target.(type) { case TimeScanner: return scanPlanBinaryTimeToTimeScanner{} + case TextScanner: + return scanPlanBinaryTimeToTextScanner{} } case TextFormatCode: switch target.(type) { @@ -165,6 +172,34 @@ func (scanPlanBinaryTimeToTimeScanner) Scan(src []byte, dst any) error { return scanner.ScanTime(Time{Microseconds: usec, Valid: true}) } +type scanPlanBinaryTimeToTextScanner struct{} + +func (scanPlanBinaryTimeToTextScanner) Scan(src []byte, dst any) error { + ts, ok := (dst).(TextScanner) + if !ok { + return ErrScanTargetTypeChanged + } + + if src == nil { + return ts.ScanText(Text{}) + } + + if len(src) != 8 { + return fmt.Errorf("invalid length for time: %v", len(src)) + } + + usec := int64(binary.BigEndian.Uint64(src)) + + tim := Time{Microseconds: usec, Valid: true} + + buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, tim).Encode(tim, nil) + if err != nil { + return err + } + + return ts.ScanText(Text{String: string(buf), Valid: true}) +} + type scanPlanTextAnyToTimeScanner struct{} func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error { @@ -176,7 +211,7 @@ func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error { s := string(src) - if len(s) < 8 { + if len(s) < 8 || s[2] != ':' || s[5] != ':' { return fmt.Errorf("cannot decode %v into Time", s) } @@ -199,6 +234,10 @@ func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error { usec += seconds * microsecondsPerSecond if len(s) > 9 { + if s[8] != '.' || len(s) > 15 { + return fmt.Errorf("cannot decode %v into Time", s) + } + fraction := s[9:] n, err := strconv.ParseInt(fraction, 10, 64) if err != nil { diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go index 35d7395660e..677a2c6ea96 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go @@ -46,7 +46,7 @@ func (ts *Timestamp) Scan(src any) error { switch src := src.(type) { case string: - return scanPlanTextTimestampToTimestampScanner{}.Scan([]byte(src), ts) + return (&scanPlanTextTimestampToTimestampScanner{}).Scan([]byte(src), ts) case time.Time: *ts = Timestamp{Time: src, Valid: true} return nil @@ -116,17 +116,21 @@ func (ts *Timestamp) UnmarshalJSON(b []byte) error { return nil } -type TimestampCodec struct{} +type TimestampCodec struct { + // ScanLocation is the location that the time is assumed to be in for scanning. This is different from + // TimestamptzCodec.ScanLocation in that this setting does change the instant in time that the timestamp represents. + ScanLocation *time.Location +} -func (TimestampCodec) FormatSupported(format int16) bool { +func (*TimestampCodec) FormatSupported(format int16) bool { return format == TextFormatCode || format == BinaryFormatCode } -func (TimestampCodec) PreferredFormat() int16 { +func (*TimestampCodec) PreferredFormat() int16 { return BinaryFormatCode } -func (TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { +func (*TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { if _, ok := value.(TimestampValuer); !ok { return nil } @@ -220,27 +224,27 @@ func discardTimeZone(t time.Time) time.Time { return t } -func (TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { +func (c *TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { switch format { case BinaryFormatCode: switch target.(type) { case TimestampScanner: - return scanPlanBinaryTimestampToTimestampScanner{} + return &scanPlanBinaryTimestampToTimestampScanner{location: c.ScanLocation} } case TextFormatCode: switch target.(type) { case TimestampScanner: - return scanPlanTextTimestampToTimestampScanner{} + return &scanPlanTextTimestampToTimestampScanner{location: c.ScanLocation} } } return nil } -type scanPlanBinaryTimestampToTimestampScanner struct{} +type scanPlanBinaryTimestampToTimestampScanner struct{ location *time.Location } -func (scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error { +func (plan *scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error { scanner := (dst).(TimestampScanner) if src == nil { @@ -264,15 +268,18 @@ func (scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000, (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000), ).UTC() + if plan.location != nil { + tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location) + } ts = Timestamp{Time: tim, Valid: true} } return scanner.ScanTimestamp(ts) } -type scanPlanTextTimestampToTimestampScanner struct{} +type scanPlanTextTimestampToTimestampScanner struct{ location *time.Location } -func (scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error { +func (plan *scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error { scanner := (dst).(TimestampScanner) if src == nil { @@ -302,13 +309,17 @@ func (scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error { tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location()) } + if plan.location != nil { + tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location) + } + ts = Timestamp{Time: tim, Valid: true} } return scanner.ScanTimestamp(ts) } -func (c TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { +func (c *TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { if src == nil { return nil, nil } @@ -326,7 +337,7 @@ func (c TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, return ts.Time, nil } -func (c TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { +func (c *TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { if src == nil { return nil, nil } diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go index f568fe3020a..7efbcffd2a1 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go @@ -54,7 +54,7 @@ func (tstz *Timestamptz) Scan(src any) error { switch src := src.(type) { case string: - return scanPlanTextTimestamptzToTimestamptzScanner{}.Scan([]byte(src), tstz) + return (&scanPlanTextTimestamptzToTimestamptzScanner{}).Scan([]byte(src), tstz) case time.Time: *tstz = Timestamptz{Time: src, Valid: true} return nil @@ -124,17 +124,21 @@ func (tstz *Timestamptz) UnmarshalJSON(b []byte) error { return nil } -type TimestamptzCodec struct{} +type TimestamptzCodec struct { + // ScanLocation is the location to return scanned timestamptz values in. This does not change the instant in time that + // the timestamptz represents. + ScanLocation *time.Location +} -func (TimestamptzCodec) FormatSupported(format int16) bool { +func (*TimestamptzCodec) FormatSupported(format int16) bool { return format == TextFormatCode || format == BinaryFormatCode } -func (TimestamptzCodec) PreferredFormat() int16 { +func (*TimestamptzCodec) PreferredFormat() int16 { return BinaryFormatCode } -func (TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { +func (*TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { if _, ok := value.(TimestamptzValuer); !ok { return nil } @@ -220,27 +224,27 @@ func (encodePlanTimestamptzCodecText) Encode(value any, buf []byte) (newBuf []by return buf, nil } -func (TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { +func (c *TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { switch format { case BinaryFormatCode: switch target.(type) { case TimestamptzScanner: - return scanPlanBinaryTimestamptzToTimestamptzScanner{} + return &scanPlanBinaryTimestamptzToTimestamptzScanner{location: c.ScanLocation} } case TextFormatCode: switch target.(type) { case TimestamptzScanner: - return scanPlanTextTimestamptzToTimestamptzScanner{} + return &scanPlanTextTimestamptzToTimestamptzScanner{location: c.ScanLocation} } } return nil } -type scanPlanBinaryTimestamptzToTimestamptzScanner struct{} +type scanPlanBinaryTimestamptzToTimestamptzScanner struct{ location *time.Location } -func (scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error { +func (plan *scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error { scanner := (dst).(TimestamptzScanner) if src == nil { @@ -264,15 +268,18 @@ func (scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) e microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000, (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000), ) + if plan.location != nil { + tim = tim.In(plan.location) + } tstz = Timestamptz{Time: tim, Valid: true} } return scanner.ScanTimestamptz(tstz) } -type scanPlanTextTimestamptzToTimestamptzScanner struct{} +type scanPlanTextTimestamptzToTimestamptzScanner struct{ location *time.Location } -func (scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error { +func (plan *scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error { scanner := (dst).(TimestamptzScanner) if src == nil { @@ -312,13 +319,17 @@ func (scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) err tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location()) } + if plan.location != nil { + tim = tim.In(plan.location) + } + tstz = Timestamptz{Time: tim, Valid: true} } return scanner.ScanTimestamptz(tstz) } -func (c TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { +func (c *TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { if src == nil { return nil, nil } @@ -336,7 +347,7 @@ func (c TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int1 return tstz.Time, nil } -func (c TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { +func (c *TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { if src == nil { return nil, nil } diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go index 36f90969ec1..38c90f3da19 100644 --- a/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go +++ b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go @@ -26,6 +26,10 @@ func (c *Conn) Release() { res := c.res c.res = nil + if c.p.releaseTracer != nil { + c.p.releaseTracer.TraceRelease(c.p, TraceReleaseData{Conn: conn}) + } + if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' { res.Destroy() // Signal to the health check to run since we just destroyed a connections diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go index 06cc63d5f50..099443bca83 100644 --- a/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go +++ b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go @@ -8,7 +8,7 @@ The primary way of creating a pool is with [pgxpool.New]: pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL")) -The database connection string can be in URL or DSN format. PostgreSQL settings, pgx settings, and pool settings can be +The database connection string can be in URL or keyword/value format. PostgreSQL settings, pgx settings, and pool settings can be specified here. In addition, a config struct can be created by [ParseConfig]. config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL")) diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go index 9f74805e192..fdcba7241ac 100644 --- a/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go +++ b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go @@ -95,6 +95,9 @@ type Pool struct { healthCheckChan chan struct{} + acquireTracer AcquireTracer + releaseTracer ReleaseTracer + closeOnce sync.Once closeChan chan struct{} } @@ -195,6 +198,14 @@ func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) { closeChan: make(chan struct{}), } + if t, ok := config.ConnConfig.Tracer.(AcquireTracer); ok { + p.acquireTracer = t + } + + if t, ok := config.ConnConfig.Tracer.(ReleaseTracer); ok { + p.releaseTracer = t + } + var err error p.p, err = puddle.NewPool( &puddle.Config[*connResource]{ @@ -279,7 +290,7 @@ func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) { // // See Config for definitions of these arguments. // -// # Example DSN +// # Example Keyword/Value // user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10 // // # Example URL @@ -498,7 +509,18 @@ func (p *Pool) createIdleResources(parentCtx context.Context, targetResources in } // Acquire returns a connection (*Conn) from the Pool -func (p *Pool) Acquire(ctx context.Context) (*Conn, error) { +func (p *Pool) Acquire(ctx context.Context) (c *Conn, err error) { + if p.acquireTracer != nil { + ctx = p.acquireTracer.TraceAcquireStart(ctx, p, TraceAcquireStartData{}) + defer func() { + var conn *pgx.Conn + if c != nil { + conn = c.Conn() + } + p.acquireTracer.TraceAcquireEnd(ctx, p, TraceAcquireEndData{Conn: conn, Err: err}) + }() + } + for { res, err := p.p.Acquire(ctx) if err != nil { diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go new file mode 100644 index 00000000000..78b9d15a2d5 --- /dev/null +++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go @@ -0,0 +1,33 @@ +package pgxpool + +import ( + "context" + + "github.com/jackc/pgx/v5" +) + +// AcquireTracer traces Acquire. +type AcquireTracer interface { + // TraceAcquireStart is called at the beginning of Acquire. + // The returned context is used for the rest of the call and will be passed to the TraceAcquireEnd. + TraceAcquireStart(ctx context.Context, pool *Pool, data TraceAcquireStartData) context.Context + // TraceAcquireEnd is called when a connection has been acquired. + TraceAcquireEnd(ctx context.Context, pool *Pool, data TraceAcquireEndData) +} + +type TraceAcquireStartData struct{} + +type TraceAcquireEndData struct { + Conn *pgx.Conn + Err error +} + +// ReleaseTracer traces Release. +type ReleaseTracer interface { + // TraceRelease is called at the beginning of Release. + TraceRelease(pool *Pool, data TraceReleaseData) +} + +type TraceReleaseData struct { + Conn *pgx.Conn +} diff --git a/vendor/github.com/jackc/pgx/v5/rows.go b/vendor/github.com/jackc/pgx/v5/rows.go index 78ef5326ac3..d4f7a9016c5 100644 --- a/vendor/github.com/jackc/pgx/v5/rows.go +++ b/vendor/github.com/jackc/pgx/v5/rows.go @@ -6,6 +6,7 @@ import ( "fmt" "reflect" "strings" + "sync" "time" "github.com/jackc/pgx/v5/pgconn" @@ -418,6 +419,8 @@ type CollectableRow interface { type RowToFunc[T any] func(row CollectableRow) (T, error) // AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T. +// +// This function closes the rows automatically on return. func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) { defer rows.Close() @@ -437,12 +440,16 @@ func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) { } // CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T. +// +// This function closes the rows automatically on return. func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) { return AppendRows([]T{}, rows, fn) } // CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true. // CollectOneRow is to CollectRows as QueryRow is to Query. +// +// This function closes the rows automatically on return. func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) { defer rows.Close() @@ -468,6 +475,8 @@ func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) { // CollectExactlyOneRow calls fn for the first row in rows and returns the result. // - If no rows are found returns an error where errors.Is(ErrNoRows) is true. // - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true. +// +// This function closes the rows automatically on return. func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) { defer rows.Close() @@ -541,7 +550,7 @@ func (rs *mapRowScanner) ScanRow(rows Rows) error { // ignored. func RowToStructByPos[T any](row CollectableRow) (T, error) { var value T - err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value}) + err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row) return value, err } @@ -550,7 +559,7 @@ func RowToStructByPos[T any](row CollectableRow) (T, error) { // the field will be ignored. func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) { var value T - err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value}) + err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row) return &value, err } @@ -558,46 +567,60 @@ type positionalStructRowScanner struct { ptrToStruct any } -func (rs *positionalStructRowScanner) ScanRow(rows Rows) error { - dst := rs.ptrToStruct - dstValue := reflect.ValueOf(dst) - if dstValue.Kind() != reflect.Ptr { - return fmt.Errorf("dst not a pointer") +func (rs *positionalStructRowScanner) ScanRow(rows CollectableRow) error { + typ := reflect.TypeOf(rs.ptrToStruct).Elem() + fields := lookupStructFields(typ) + if len(rows.RawValues()) > len(fields) { + return fmt.Errorf( + "got %d values, but dst struct has only %d fields", + len(rows.RawValues()), + len(fields), + ) } - - dstElemValue := dstValue.Elem() - scanTargets := rs.appendScanTargets(dstElemValue, nil) - - if len(rows.RawValues()) > len(scanTargets) { - return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets)) - } - + scanTargets := setupStructScanTargets(rs.ptrToStruct, fields) return rows.Scan(scanTargets...) } -func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any { - dstElemType := dstElemValue.Type() +// Map from reflect.Type -> []structRowField +var positionalStructFieldMap sync.Map - if scanTargets == nil { - scanTargets = make([]any, 0, dstElemType.NumField()) +func lookupStructFields(t reflect.Type) []structRowField { + if cached, ok := positionalStructFieldMap.Load(t); ok { + return cached.([]structRowField) } - for i := 0; i < dstElemType.NumField(); i++ { - sf := dstElemType.Field(i) + fieldStack := make([]int, 0, 1) + fields := computeStructFields(t, make([]structRowField, 0, t.NumField()), &fieldStack) + fieldsIface, _ := positionalStructFieldMap.LoadOrStore(t, fields) + return fieldsIface.([]structRowField) +} + +func computeStructFields( + t reflect.Type, + fields []structRowField, + fieldStack *[]int, +) []structRowField { + tail := len(*fieldStack) + *fieldStack = append(*fieldStack, 0) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + (*fieldStack)[tail] = i // Handle anonymous struct embedding, but do not try to handle embedded pointers. if sf.Anonymous && sf.Type.Kind() == reflect.Struct { - scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets) + fields = computeStructFields(sf.Type, fields, fieldStack) } else if sf.PkgPath == "" { dbTag, _ := sf.Tag.Lookup(structTagKey) if dbTag == "-" { // Field is ignored, skip it. continue } - scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface()) + fields = append(fields, structRowField{ + path: append([]int(nil), *fieldStack...), + }) } } - - return scanTargets + *fieldStack = (*fieldStack)[:tail] + return fields } // RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public @@ -605,7 +628,7 @@ func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Val // column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored. func RowToStructByName[T any](row CollectableRow) (T, error) { var value T - err := row.Scan(&namedStructRowScanner{ptrToStruct: &value}) + err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row) return value, err } @@ -615,7 +638,7 @@ func RowToStructByName[T any](row CollectableRow) (T, error) { // then the field will be ignored. func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) { var value T - err := row.Scan(&namedStructRowScanner{ptrToStruct: &value}) + err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row) return &value, err } @@ -624,7 +647,7 @@ func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) { // column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored. func RowToStructByNameLax[T any](row CollectableRow) (T, error) { var value T - err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true}) + err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row) return value, err } @@ -634,7 +657,7 @@ func RowToStructByNameLax[T any](row CollectableRow) (T, error) { // then the field will be ignored. func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) { var value T - err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true}) + err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row) return &value, err } @@ -643,64 +666,123 @@ type namedStructRowScanner struct { lax bool } -func (rs *namedStructRowScanner) ScanRow(rows Rows) error { - dst := rs.ptrToStruct - dstValue := reflect.ValueOf(dst) - if dstValue.Kind() != reflect.Ptr { - return fmt.Errorf("dst not a pointer") - } - - dstElemValue := dstValue.Elem() - scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions()) +func (rs *namedStructRowScanner) ScanRow(rows CollectableRow) error { + typ := reflect.TypeOf(rs.ptrToStruct).Elem() + fldDescs := rows.FieldDescriptions() + namedStructFields, err := lookupNamedStructFields(typ, fldDescs) if err != nil { return err } - - for i, t := range scanTargets { - if t == nil { - return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name) - } + if !rs.lax && namedStructFields.missingField != "" { + return fmt.Errorf("cannot find field %s in returned row", namedStructFields.missingField) } - + fields := namedStructFields.fields + scanTargets := setupStructScanTargets(rs.ptrToStruct, fields) return rows.Scan(scanTargets...) } -const structTagKey = "db" - -func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) { - i = -1 - for i, desc := range fldDescs { +// Map from namedStructFieldMap -> *namedStructFields +var namedStructFieldMap sync.Map - // Snake case support. - field = strings.ReplaceAll(field, "_", "") - descName := strings.ReplaceAll(desc.Name, "_", "") +type namedStructFieldsKey struct { + t reflect.Type + colNames string +} - if strings.EqualFold(descName, field) { - return i - } - } - return +type namedStructFields struct { + fields []structRowField + // missingField is the first field from the struct without a corresponding row field. + // This is used to construct the correct error message for non-lax queries. + missingField string } -func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) { - var err error - dstElemType := dstElemValue.Type() +func lookupNamedStructFields( + t reflect.Type, + fldDescs []pgconn.FieldDescription, +) (*namedStructFields, error) { + key := namedStructFieldsKey{ + t: t, + colNames: joinFieldNames(fldDescs), + } + if cached, ok := namedStructFieldMap.Load(key); ok { + return cached.(*namedStructFields), nil + } - if scanTargets == nil { - scanTargets = make([]any, len(fldDescs)) + // We could probably do two-levels of caching, where we compute the key -> fields mapping + // for a type only once, cache it by type, then use that to compute the column -> fields + // mapping for a given set of columns. + fieldStack := make([]int, 0, 1) + fields, missingField := computeNamedStructFields( + fldDescs, + t, + make([]structRowField, len(fldDescs)), + &fieldStack, + ) + for i, f := range fields { + if f.path == nil { + return nil, fmt.Errorf( + "struct doesn't have corresponding row field %s", + fldDescs[i].Name, + ) + } } - for i := 0; i < dstElemType.NumField(); i++ { - sf := dstElemType.Field(i) + fieldsIface, _ := namedStructFieldMap.LoadOrStore( + key, + &namedStructFields{fields: fields, missingField: missingField}, + ) + return fieldsIface.(*namedStructFields), nil +} + +func joinFieldNames(fldDescs []pgconn.FieldDescription) string { + switch len(fldDescs) { + case 0: + return "" + case 1: + return fldDescs[0].Name + } + + totalSize := len(fldDescs) - 1 // Space for separator bytes. + for _, d := range fldDescs { + totalSize += len(d.Name) + } + var b strings.Builder + b.Grow(totalSize) + b.WriteString(fldDescs[0].Name) + for _, d := range fldDescs[1:] { + b.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character. + b.WriteString(d.Name) + } + return b.String() +} + +func computeNamedStructFields( + fldDescs []pgconn.FieldDescription, + t reflect.Type, + fields []structRowField, + fieldStack *[]int, +) ([]structRowField, string) { + var missingField string + tail := len(*fieldStack) + *fieldStack = append(*fieldStack, 0) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + (*fieldStack)[tail] = i if sf.PkgPath != "" && !sf.Anonymous { // Field is unexported, skip it. continue } // Handle anonymous struct embedding, but do not try to handle embedded pointers. if sf.Anonymous && sf.Type.Kind() == reflect.Struct { - scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs) - if err != nil { - return nil, err + var missingSubField string + fields, missingSubField = computeNamedStructFields( + fldDescs, + sf.Type, + fields, + fieldStack, + ) + if missingField == "" { + missingField = missingSubField } } else { dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey) @@ -717,17 +799,53 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s } fpos := fieldPosByName(fldDescs, colName) if fpos == -1 { - if rs.lax { - continue + if missingField == "" { + missingField = colName } - return nil, fmt.Errorf("cannot find field %s in returned row", colName) + continue } - if fpos >= len(scanTargets) && !rs.lax { - return nil, fmt.Errorf("cannot find field %s in returned row", colName) + fields[fpos] = structRowField{ + path: append([]int(nil), *fieldStack...), } - scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface() } } + *fieldStack = (*fieldStack)[:tail] + + return fields, missingField +} + +const structTagKey = "db" + +func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) { + i = -1 + for i, desc := range fldDescs { - return scanTargets, err + // Snake case support. + field = strings.ReplaceAll(field, "_", "") + descName := strings.ReplaceAll(desc.Name, "_", "") + + if strings.EqualFold(descName, field) { + return i + } + } + return +} + +// structRowField describes a field of a struct. +// +// TODO: It would be a bit more efficient to track the path using the pointer +// offset within the (outermost) struct and use unsafe.Pointer arithmetic to +// construct references when scanning rows. However, it's not clear it's worth +// using unsafe for this. +type structRowField struct { + path []int +} + +func setupStructScanTargets(receiver any, fields []structRowField) []any { + scanTargets := make([]any, len(fields)) + v := reflect.ValueOf(receiver).Elem() + for i, f := range fields { + scanTargets[i] = v.FieldByIndex(f.path).Addr().Interface() + } + return scanTargets } diff --git a/vendor/github.com/jackc/pgx/v5/stdlib/sql.go b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go index 3d65e23ad9c..29cd3fbbffd 100644 --- a/vendor/github.com/jackc/pgx/v5/stdlib/sql.go +++ b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go @@ -7,7 +7,7 @@ // return err // } // -// Or from a DSN string. +// Or from a keyword/value string. // // db, err := sql.Open("pgx", "user=postgres password=secret host=localhost port=5432 database=pgx_test sslmode=disable") // if err != nil { diff --git a/vendor/github.com/jackc/pgx/v5/values.go b/vendor/github.com/jackc/pgx/v5/values.go index cab717d0ab2..6e2ff30030e 100644 --- a/vendor/github.com/jackc/pgx/v5/values.go +++ b/vendor/github.com/jackc/pgx/v5/values.go @@ -3,7 +3,6 @@ package pgx import ( "errors" - "github.com/jackc/pgx/v5/internal/anynil" "github.com/jackc/pgx/v5/internal/pgio" "github.com/jackc/pgx/v5/pgtype" ) @@ -15,10 +14,6 @@ const ( ) func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) { - if anynil.Is(arg) { - return nil, nil - } - buf, err := m.Encode(0, TextFormatCode, arg, []byte{}) if err != nil { return nil, err @@ -30,10 +25,6 @@ func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) { } func encodeCopyValue(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) { - if anynil.Is(arg) { - return pgio.AppendInt32(buf, -1), nil - } - sp := len(buf) buf = pgio.AppendInt32(buf, -1) argBuf, err := m.Encode(oid, BinaryFormatCode, arg, buf) diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde187..05c7359e481 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -560,6 +565,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a028..2754bac6f16 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 1253ea675c4..637c931474f 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -937,7 +937,7 @@ func WriterUncompressed() WriterOption { // WriterBlockSize allows to override the default block size. // Blocks will be this size or smaller. -// Minimum size is 4KB and and maximum size is 4MB. +// Minimum size is 4KB and maximum size is 4MB. // // Bigger blocks may give bigger throughput on systems with many cores, // and will increase compression slightly, but it will limit the possible diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601ff..03744fbc765 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ade5..32a7f401d5d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21eb8..bbca17234aa 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879a87..4613724e9d1 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e052..a4f5bf91fc6 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go similarity index 83% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go index 258c0636aac..7c08e564f14 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go @@ -19,9 +19,10 @@ import ( "errors" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) +// TODO: Give error package name prefix in next minor release. var errInvalidVarint = errors.New("invalid varint32 encountered") // ReadDelimited decodes a message from the provided length-delimited stream, @@ -36,6 +37,12 @@ var errInvalidVarint = errors.New("invalid varint32 encountered") // of the stream has been reached in doing so. In that case, any subsequent // calls return (0, io.EOF). func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify a decode buffer in the + // next major version. + + // TODO: Consider using error wrapping to annotate error state in pass- + // through cases in the next minor version. + // Per AbstractParser#parsePartialDelimitedFrom with // CodedInputStream#readRawVarint32. var headerBuf [binary.MaxVarintLen32]byte @@ -53,15 +60,14 @@ func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { if err != nil { return bytesRead, err } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... + // A Reader should not return (0, nil); but if it does, it should + // be treated as no-op according to the Reader contract. continue } bytesRead += newBytesRead // Now present everything read so far to the varint decoder and // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) } messageBuf := make([]byte, messageLength) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go similarity index 91% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go index 8fb59ad226f..e58dd9d2974 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // WriteDelimited encodes and dumps a message to the provided writer prefixed @@ -28,6 +28,9 @@ import ( // number of bytes written and any applicable error. This is roughly // equivalent to the companion Java API's MessageLite#writeDelimitedTo. func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify an encode buffer in the + // next major version. + buffer, err := proto.Marshal(m) if err != nil { return 0, err diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 44222220a38..782817c3971 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,59 @@ +## 2.19.1 + +### Fixes +- update supported platforms for race conditions [63c8c30] +- [build] Allow custom name for binaries. [ff41e27] + +### Maintenance +- bump gomega [76f4e0c] +- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d] +- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741] + +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + ## 2.17.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7ee2..80de566a522 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 00000000000..cb099aff950 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,11 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p + +.PHONY: vet +vet: + go vet ./... diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 5db5d1a7bfd..fd172608437 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,6 +2,8 @@ package build import ( "fmt" + "os" + "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - fmt.Printf("Compiled %s.test\n", suite.PackageName) + if len(goFlagsConfig.O) == 0 { + goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") + } else { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + goFlagsConfig.O += "/" + suite.PackageName + ".test" + } + } + fmt.Printf("Compiled %s\n", goFlagsConfig.O) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a18..b2dc59be66f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec979d..cf3b7cb6d6d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340d17..48827cc5efc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 5f35864ddba..8e16d2bb034 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -161,6 +161,7 @@ func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { if err != nil { return err } + defer dst.Close() err = DumpCoverProfiles(merged, dst) if err != nil { return err @@ -196,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc3c..0e6ae1f2903 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a994ee3d67e..a3c9e6bf18f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 4026859ec39..480730486af 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -202,6 +202,11 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -278,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -419,7 +427,11 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) if r.conf.GithubOutput { - r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } else { r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 43244a9bd51..562e0f62ba2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -324,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index a3aef821bff..c7de7a8be09 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if internalBodyType.NumIn() > 0. { + if internalBodyType.NumIn() > 0 { if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index cef273ee1ff..9ca408eae77 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -90,6 +91,8 @@ type ReporterConfig struct { FullTrace bool ShowNodeEvents bool GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -216,6 +219,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -275,6 +279,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -334,6 +340,10 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -502,7 +512,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -552,6 +562,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -605,7 +617,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -628,7 +640,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651e7e..7fdc8aa23f3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 851d42b456b..f2ee0501d51 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.1" +const VERSION = "2.19.1" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 89230f199d0..c6c34d65dc8 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,33 @@ +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + +## 1.34.0 + +### Features +- Add RoundTripper method to ghttp.Server [c549e0d] + +### Fixes +- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c] +- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67] + +### Maintenance +- bump ginkgo [8af2ece] +- Fix typo in docs [123a071] +- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083] +- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f] +- Bump github-pages from 230 to 231 in /docs (#748) [892c303] + +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + ## 1.33.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1980a63c7f1..2546ccceb0b 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.33.0" +const GOMEGA_VERSION = "1.34.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index dca5b944672..5a236d7d699 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -30,15 +30,18 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool lenMatchers := len(matchers) lenValues := len(values) + success = true for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i + success = false continue } if i >= lenValues { matcher.missingIndex = i + success = false return } @@ -49,15 +52,17 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool index: i, failure: err.Error(), }) + success = false } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), }) + success = false } } - return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil + return success, nil } func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 1c54edd8f17..4339acc6418 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,6 +1,8 @@ package bipartitegraph import ( + "golang.org/x/exp/slices" + . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" "github.com/onsi/gomega/matchers/support/goraph/util" @@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} } + if done { // if last layer - into last layer must be only 'free' nodes + currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool { + return !matching.Free(in) + }) + } guideLayers = append(guideLayers, currentLayer) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go index 3aa8d0590ba..b22d862fbc6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -22,7 +22,7 @@ import "github.com/prometheus/client_golang/prometheus" // Prometheus metrics. Note that the data models of expvar and Prometheus are // fundamentally different, and that the expvar Collector is inherently slower // than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more +// for experiments and prototyping, but you should seriously consider a more // direct implementation of Prometheus metrics for monitoring production // systems. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index 2f5616894e7..bcfa4fa10e0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -132,16 +132,19 @@ type GoCollectionOption uint32 const ( // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. - // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + // + // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. - // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // + // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) // function to enable those metrics in the collector. GoRuntimeMetricsCollection ) // WithGoCollections allows enabling different collections for Go collector on top of base metrics. -// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +// +// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { return func(options *internal.GoCollectorOptions) { if flags&GoRuntimeMemStatsCollection == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 62de4dc59aa..4ce84e7a80e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -66,7 +67,7 @@ type CounterVecOpts struct { CounterOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -106,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error { exemplar = e.(*dto.Exemplar) } val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index deedc2dfbe7..68ffe3c2480 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -52,7 +52,7 @@ type Desc struct { constLabelPairs []*dto.LabelPair // variableLabels contains names of labels and normalization function for // which the metric maintains variable values. - variableLabels ConstrainedLabels + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels.constrainedLabels(), + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels { - if !checkLabelName(label.Name) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+label.Name) - labelNameSet[label.Name] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) @@ -189,11 +189,19 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3bb..de5a8562931 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index f1ea6c76f75..dd2eac94067 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -62,7 +62,7 @@ type GaugeVecOpts struct { GaugeOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d818afe90d..b5c8bcb395a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -391,7 +392,7 @@ type HistogramOpts struct { // zero, it is replaced by default buckets. The default buckets are // DefBuckets if no buckets for a native histogram (see below) are used, // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have + // use both regular buckets and buckets for a native histogram, you have // to define the regular buckets here explicitly.) Buckets []float64 @@ -413,8 +414,8 @@ type HistogramOpts struct { // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is // still smaller or equal to NativeHistogramBucketFactor. Note that the // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // ). If NativeHistogramBucketFactor is greater than 1 but smaller than @@ -428,12 +429,12 @@ type HistogramOpts struct { // a major version bump. NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). @@ -446,26 +447,37 @@ type HistogramOpts struct { // Histogram are sufficiently wide-spread. In particular, this could be // used as a DoS attack vector. Where the observed values depend on // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). NativeHistogramMaxBucketNumber uint32 NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -475,7 +487,7 @@ type HistogramVecOpts struct { HistogramOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -499,12 +511,12 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == bucketLabel { + for _, n := range desc.variableLabels.names { + if n == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -514,6 +526,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } + if opts.now == nil { + opts.now = time.Now + } + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -521,8 +539,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + lastResetTime: opts.now(), + now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -701,9 +720,18 @@ type histogram struct { nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxBuckets uint32 nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. - - now func() time.Time // To mock out time.Now() for testing. + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -742,9 +770,10 @@ func (h *histogram) Write(out *dto.Metric) error { waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } out.Histogram = his out.Label = h.labelPairs @@ -782,6 +811,16 @@ func (h *histogram) Write(out *dto.Metric) error { his.ZeroCount = proto.Uint64(zeroBucket) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } } addAndResetCounts(hotCounts, coldCounts) return nil @@ -848,26 +887,39 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. h.resetCounts(cold) // Repeat the latest observation to not lose it completely. cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. + // Make coldCounts the new hot counts while resetting countAndHotIdx. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) count := n & ((1 << 63) - 1) waitForCooldown(count, hot) @@ -877,6 +929,29 @@ func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be @@ -1176,6 +1251,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -1183,7 +1259,9 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } buckets := make([]*dto.Bucket, 0, len(h.buckets)) @@ -1230,7 +1308,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -1324,7 +1402,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { // Multiple spans with only small gaps in between are probably // encoded more efficiently as one larger span with a few empty // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create + // we assume that gaps of one or two buckets should not create // a new span. iDelta := int32(i - nextI) if n == 0 || iDelta > 2 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd0750f2cf5..a595a203625 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -14,7 +14,7 @@ // It provides tools to compare sequences of strings and generate textual diffs. // // Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. +// (https://github.com/pmezard/go-difflib) is no longer maintained. package internal import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 63ff8683ce5..c21911f292d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,19 +32,15 @@ import ( // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + // ConstrainedLabels represents a label name and its constrain function // to normalize label values. This type is commonly used when constructing // metric vector Collectors. type ConstrainedLabel struct { Name string - Constraint func(string) string -} - -func (cl ConstrainedLabel) Constrain(v string) string { - if cl.Constraint == nil { - return v - } - return cl.Constraint(v) + Constraint LabelConstraint } // ConstrainableLabels is an interface that allows creating of labels that can @@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string { // }, // }) type ConstrainableLabels interface { - constrainedLabels() ConstrainedLabels + compile() *compiledLabels labelNames() []string } @@ -67,8 +63,20 @@ type ConstrainableLabels interface { // metric vector Collectors. type ConstrainedLabels []ConstrainedLabel -func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { - return cls +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled } func (cls ConstrainedLabels) labelNames() []string { @@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string { // } type UnconstrainedLabels []string -func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { - constrainedLabels := make([]ConstrainedLabel, len(uls)) - for i, l := range uls { - constrainedLabels[i] = ConstrainedLabel{Name: l} +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, } - return constrainedLabels } func (uls UnconstrainedLabels) labelNames() []string { return uls } +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" @@ -139,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 07bbc9d7687..f018e57237d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -92,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb613..8c1136ceea3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go new file mode 100644 index 00000000000..d8d9a6d7a2f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 3793036ad09..356edb7868c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -389,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool { return true } -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + labels := prometheus.Labels{} + if !(code || method) { - return emptyLabels + return labels } - labels := prometheus.Labels{} if code { labels["code"] = sanitizeCode(status) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 44da9433bee..5e2ced25a02 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -963,9 +963,9 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index dd359264e59..1462704446c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -26,6 +26,7 @@ import ( "github.com/beorn7/perks/quantile" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -145,6 +146,9 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // SummaryVecOpts bundles the options to create a SummaryVec metric. @@ -154,7 +158,7 @@ type SummaryVecOpts struct { SummaryOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == quantileLabel { + for _, n := range desc.variableLabels.names { + if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -286,6 +295,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() @@ -440,6 +453,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -681,6 +697,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -737,7 +756,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go new file mode 100644 index 00000000000..9ba42826adb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import dto "github.com/prometheus/client_model/go" + +// A Problem is an issue detected by a linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index c8864b6c3f5..dd29cccc30c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -16,15 +16,11 @@ package promlint import ( "errors" - "fmt" "io" - "regexp" "sort" - "strings" - - "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" ) // A Linter is a Prometheus metrics linter. It identifies issues with metric @@ -37,23 +33,8 @@ type Linter struct { // of them. r io.Reader mfs []*dto.MetricFamily -} -// A Problem is an issue detected by a Linter. -type Problem struct { - // The name of the metric indicated by this Problem. - Metric string - - // A description of the issue for this Problem. - Text string -} - -// newProblem is helper function to create a Problem. -func newProblem(mf *dto.MetricFamily, text string) Problem { - return Problem{ - Metric: mf.GetName(), - Text: text, - } + customValidations []Validation } // New creates a new Linter that reads an input stream of Prometheus metrics in @@ -72,6 +53,14 @@ func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { } } +// AddCustomValidations adds custom validations to the linter. +func (l *Linter) AddCustomValidations(vs ...Validation) { + if l.customValidations == nil { + l.customValidations = make([]Validation, 0, len(vs)) + } + l.customValidations = append(l.customValidations, vs...) +} + // Lint performs a linting pass, returning a slice of Problems indicating any // issues found in the metrics stream. The slice is sorted by metric name // and issue description. @@ -91,11 +80,11 @@ func (l *Linter) Lint() ([]Problem, error) { return nil, err } - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } } for _, mf := range l.mfs { - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } // Ensure deterministic output. @@ -110,276 +99,25 @@ func (l *Linter) Lint() ([]Problem, error) { } // lint is the entry point for linting a single metric. -func lint(mf *dto.MetricFamily) []Problem { - fns := []func(mf *dto.MetricFamily) []Problem{ - lintHelp, - lintMetricUnits, - lintCounter, - lintHistogramSummaryReserved, - lintMetricTypeInName, - lintReservedChars, - lintCamelCase, - lintUnitAbbreviations, - } - - var problems []Problem - for _, fn := range fns { - problems = append(problems, fn(mf)...) - } - - // TODO(mdlayher): lint rules for specific metrics types. - return problems -} - -// lintHelp detects issues related to the help text for a metric. -func lintHelp(mf *dto.MetricFamily) []Problem { +func (l *Linter) lint(mf *dto.MetricFamily) []Problem { var problems []Problem - // Expect all metrics to have help text available. - if mf.Help == nil { - problems = append(problems, newProblem(mf, "no help text")) - } - - return problems -} - -// lintMetricUnits detects issues with metric unit names. -func lintMetricUnits(mf *dto.MetricFamily) []Problem { - var problems []Problem - - unit, base, ok := metricUnits(*mf.Name) - if !ok { - // No known units detected. - return nil - } - - // Unit is already a base unit. - if unit == base { - return nil - } - - problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) - - return problems -} - -// lintCounter detects issues specific to counters, as well as patterns that should -// only be used with counters. -func lintCounter(mf *dto.MetricFamily) []Problem { - var problems []Problem - - isCounter := mf.GetType() == dto.MetricType_COUNTER - isUntyped := mf.GetType() == dto.MetricType_UNTYPED - hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") - - switch { - case isCounter && !hasTotalSuffix: - problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) - case !isUntyped && !isCounter && hasTotalSuffix: - problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) - } - - return problems -} - -// lintHistogramSummaryReserved detects when other types of metrics use names or labels -// reserved for use by histograms and/or summaries. -func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { - // These rules do not apply to untyped metrics. - t := mf.GetType() - if t == dto.MetricType_UNTYPED { - return nil - } - - var problems []Problem - - isHistogram := t == dto.MetricType_HISTOGRAM - isSummary := t == dto.MetricType_SUMMARY - - n := mf.GetName() - - if !isHistogram && strings.HasSuffix(n, "_bucket") { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) - } - - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - ln := l.GetName() - - if !isHistogram && ln == "le" { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) - } - if !isSummary && ln == "quantile" { - problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) - } - } - } - - return problems -} - -// lintMetricTypeInName detects when metric types are included in the metric name. -func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue + for _, fn := range defaultValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } - - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) - } - } - return problems -} - -// lintReservedChars detects colons in metric names. -func lintReservedChars(mf *dto.MetricFamily) []Problem { - var problems []Problem - if strings.Contains(mf.GetName(), ":") { - problems = append(problems, newProblem(mf, "metric names should not contain ':'")) - } - return problems -} - -var camelCase = regexp.MustCompile(`[a-z][A-Z]`) - -// lintCamelCase detects metric names and label names written in camelCase. -func lintCamelCase(mf *dto.MetricFamily) []Problem { - var problems []Problem - if camelCase.FindString(mf.GetName()) != "" { - problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) } - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - if camelCase.FindString(l.GetName()) != "" { - problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + if l.customValidations != nil { + for _, fn := range l.customValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } } } - return problems -} -// lintUnitAbbreviations detects abbreviated units in the metric name. -func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - for _, s := range unitAbbreviations { - if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { - problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) - } - } + // TODO(mdlayher): lint rules for specific metrics types. return problems } - -// metricUnits attempts to detect known unit types used as part of a metric name, -// e.g. "foo_bytes_total" or "bar_baz_milligrams". -func metricUnits(m string) (unit, base string, ok bool) { - ss := strings.Split(m, "_") - - for _, s := range ss { - if base, found := units[s]; found { - return s, base, true - } - - for _, p := range unitPrefixes { - if strings.HasPrefix(s, p) { - if base, found := units[s[len(p):]]; found { - return s, base, true - } - } - } - } - - return "", "", false -} - -// Units and their possible prefixes recognized by this library. More can be -// added over time as needed. -var ( - // map a unit to the appropriate base unit. - units = map[string]string{ - // Base units. - "amperes": "amperes", - "bytes": "bytes", - "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. - "grams": "grams", - "joules": "joules", - "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). - "meters": "meters", // Both American and international spelling permitted. - "metres": "metres", - "seconds": "seconds", - "volts": "volts", - - // Non base units. - // Time. - "minutes": "seconds", - "hours": "seconds", - "days": "seconds", - "weeks": "seconds", - // Temperature. - "kelvins": "kelvin", - "fahrenheit": "celsius", - "rankine": "celsius", - // Length. - "inches": "meters", - "yards": "meters", - "miles": "meters", - // Bytes. - "bits": "bytes", - // Energy. - "calories": "joules", - // Mass. - "pounds": "grams", - "ounces": "grams", - } - - unitPrefixes = []string{ - "pico", - "nano", - "micro", - "milli", - "centi", - "deci", - "deca", - "hecto", - "kilo", - "kibi", - "mega", - "mibi", - "giga", - "gibi", - "tera", - "tebi", - "peta", - "pebi", - } - - // Common abbreviations that we'd like to discourage. - unitAbbreviations = []string{ - "s", - "ms", - "us", - "ns", - "sec", - "b", - "kb", - "mb", - "gb", - "tb", - "pb", - "m", - "h", - "d", - } -) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go new file mode 100644 index 00000000000..f52ad9eab67 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import ( + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint/validations" +) + +type Validation = func(mf *dto.MetricFamily) []error + +var defaultValidations = []Validation{ + validations.LintHelp, + validations.LintMetricUnits, + validations.LintCounter, + validations.LintHistogramSummaryReserved, + validations.LintMetricTypeInName, + validations.LintReservedChars, + validations.LintCamelCase, + validations.LintUnitAbbreviations, +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go new file mode 100644 index 00000000000..f2c2c3905dd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func LintCounter(mf *dto.MetricFamily) []error { + var problems []error + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, errors.New(`counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`)) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go new file mode 100644 index 00000000000..bc8dbd1e16b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -0,0 +1,101 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "fmt" + "regexp" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// LintMetricUnits detects issues with metric unit names. +func LintMetricUnits(mf *dto.MetricFamily) []error { + var problems []error + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit)) + + return problems +} + +// LintMetricTypeInName detects when metric types are included in the metric name. +func LintMetricTypeInName(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + + for i, t := range dto.MetricType_name { + if i == int32(dto.MetricType_UNTYPED) { + continue + } + + typename := strings.ToLower(t) + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) + } + } + return problems +} + +// LintReservedChars detects colons in metric names. +func LintReservedChars(mf *dto.MetricFamily) []error { + var problems []error + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, errors.New("metric names should not contain ':'")) + } + return problems +} + +// LintCamelCase detects metric names and label names written in camelCase. +func LintCamelCase(mf *dto.MetricFamily) []error { + var problems []error + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// LintUnitAbbreviations detects abbreviated units in the metric name. +func LintUnitAbbreviations(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, errors.New("metric names should not contain abbreviated units")) + } + } + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go new file mode 100644 index 00000000000..1df29446897 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + + dto "github.com/prometheus/client_model/go" +) + +// LintHelp detects issues related to the help text for a metric. +func LintHelp(mf *dto.MetricFamily) []error { + var problems []error + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, errors.New("no help text")) + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go new file mode 100644 index 00000000000..6564bdf3668 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go new file mode 100644 index 00000000000..967977d2b01 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import "strings" + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit, base string, ok bool) { + ss := strings.Split(m, "_") + + for _, s := range ss { + if base, found := units[s]; found { + return s, base, true + } + + for _, p := range unitPrefixes { + if strings.HasPrefix(s, p) { + if base, found := units[s[len(p):]]; found { + return s, base, true + } + } + } + } + + return "", "", false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 82d4a5436b6..269f56435b8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -47,6 +47,7 @@ import ( "github.com/davecgh/go-spew/spew" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/internal" @@ -230,6 +231,20 @@ func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) return nil, fmt.Errorf("converting reader to metric families failed: %w", err) } + // The text protocol handles empty help fields inconsistently. When + // encoding, any non-nil value, include the empty string, produces a + // "# HELP" line. But when decoding, the help field is only set to a + // non-nil value if the "# HELP" line contains a non-empty value. + // + // Because metrics in a registry always have non-nil help fields, populate + // any nil help fields in the parsed metrics with the empty string so that + // when we compare text encodings, the results are consistent. + for _, metric := range notNormalized { + if metric.Help == nil { + metric.Help = proto.String("") + } + } + return internal.NormalizeMetricFamilies(notNormalized), nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 5f6bb80014d..cc23011fad2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "sort" "time" @@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { return nil, err } @@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { desc *Desc metric *dto.Metric @@ -153,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -176,19 +215,19 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index f0d0015a0ff..955cfd59f83 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,24 +20,6 @@ import ( "github.com/prometheus/common/model" ) -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func getLabelsFromPool() Labels { - return labelsPool.Get().(Labels) -} - -func putLabelsToPool(labels Labels) { - for k := range labels { - delete(labels, k) - } - - labelsPool.Put(labels) -} - // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() return m.metricMap.deleteByLabels(labels, m.curry) } @@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label.Name) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label.Name) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label.Name) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k.Name] { + if values[i] != labels[k] { return false } } @@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } - labelValues[i] = labels[k.Name] + labelValues[i] = labels[k] } return labelValues } @@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { return labelValues } -func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedLabels := getLabelsFromPool() - for l, v := range labels { - if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - v = desc.variableLabels[i].Constrain(v) - } +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} - constrainedLabels[l] = v +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} } - return constrainedLabels + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + constrainedValues := make([]string, len(lvs)) var iCurry, iLVs int for i := 0; i < len(lvs)+len(curry); i++ { @@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [ continue } - if i < len(desc.variableLabels) { - constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) } else { constrainedValues[iLVs] = lvs[iLVs] } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2b5bca4b999..cee360db7f3 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -215,8 +215,9 @@ type Counter struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Counter) Reset() { @@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar { return nil } +func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Quantile struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -325,9 +333,10 @@ type Summary struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Summary) Reset() { @@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile { return nil } +func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Untyped struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -439,7 +455,8 @@ type Histogram struct { SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -457,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float @@ -525,6 +545,13 @@ func (x *Histogram) GetBucket() []*Bucket { return nil } +func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + func (x *Histogram) GetSchema() int32 { if x != nil && x.Schema != nil { return *x.Schema @@ -972,137 +999,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, - 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, - 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, + 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, + 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, + 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, - 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, - 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, - 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, - 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, + 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, - 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, - 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, - 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, - 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, - 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, - 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, + 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, - 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, - 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, - 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, - 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, - 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, - 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, - 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, - 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, - 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, + 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, + 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, + 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, } var ( @@ -1137,26 +1178,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ } var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar - 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile - 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket - 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan - 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp + 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp + 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 37aa966748b..4763549b8ff 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -129,6 +129,7 @@ func (tv *TLSVersion) String() string { // BasicAuth contains basic HTTP authentication credentials. type BasicAuth struct { Username string `yaml:"username" json:"username"` + UsernameFile string `yaml:"username_file,omitempty" json:"username_file,omitempty"` Password Secret `yaml:"password,omitempty" json:"password,omitempty"` PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"` } @@ -139,6 +140,7 @@ func (a *BasicAuth) SetDirectory(dir string) { return } a.PasswordFile = JoinDir(dir, a.PasswordFile) + a.UsernameFile = JoinDir(dir, a.UsernameFile) } // Authorization contains HTTP authorization credentials. @@ -334,6 +336,9 @@ func (c *HTTPClientConfig) Validate() error { if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } + if c.BasicAuth != nil && (string(c.BasicAuth.Username) != "" && c.BasicAuth.UsernameFile != "") { + return fmt.Errorf("at most one of basic_auth username & username_file must be configured") + } if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") { return fmt.Errorf("at most one of basic_auth password & password_file must be configured") } @@ -555,7 +560,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT } if cfg.BasicAuth != nil { - rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt) + rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.UsernameFile, cfg.BasicAuth.PasswordFile, rt) } if cfg.OAuth2 != nil { @@ -645,30 +650,43 @@ func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() { type basicAuthRoundTripper struct { username string password Secret + usernameFile string passwordFile string rt http.RoundTripper } // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username string, password Secret, passwordFile string, rt http.RoundTripper) http.RoundTripper { - return &basicAuthRoundTripper{username, password, passwordFile, rt} +func NewBasicAuthRoundTripper(username string, password Secret, usernameFile, passwordFile string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, usernameFile, passwordFile, rt} } func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var username string + var password string if len(req.Header.Get("Authorization")) != 0 { return rt.rt.RoundTrip(req) } - req = cloneRequest(req) + if rt.usernameFile != "" { + usernameBytes, err := os.ReadFile(rt.usernameFile) + if err != nil { + return nil, fmt.Errorf("unable to read basic auth username file %s: %s", rt.usernameFile, err) + } + username = strings.TrimSpace(string(usernameBytes)) + } else { + username = rt.username + } if rt.passwordFile != "" { - bs, err := os.ReadFile(rt.passwordFile) + passwordBytes, err := os.ReadFile(rt.passwordFile) if err != nil { return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) } - req.SetBasicAuth(rt.username, strings.TrimSpace(string(bs))) + password = strings.TrimSpace(string(passwordBytes)) } else { - req.SetBasicAuth(rt.username, strings.TrimSpace(string(rt.password))) + password = string(rt.password) } + req = cloneRequest(req) + req.SetBasicAuth(username, password) return rt.rt.RoundTrip(req) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 90639781513..0ca86a3dc7c 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -22,7 +22,7 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffaad7..ca214060009 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,7 +18,7 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "google.golang.org/protobuf/encoding/prototext" diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a9273..126df9e67ac 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0acfb9d8063..16172923506 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,13 +55,13 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),) endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.56.2 +GOLANGCI_LINT_VERSION ?= v1.59.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index eb88d78aab2..83807500908 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index dd2b8988141..67a9d2b4486 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,9 +139,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -206,48 +210,54 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 2f54e77c753..75a3b6c810f 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -432,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 0e8c4fa0b01..142796368fe 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a407a..09060e82080 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md b/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md index db633d44a0b..fd03c1f9beb 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md +++ b/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md @@ -1,5 +1,73 @@ # Changelog +## [v1.10.0](https://github.com/rabbitmq/amqp091-go/tree/v1.10.0) (2024-05-08) + +[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.9.0...v1.10.0) + +**Implemented enhancements:** + +- Undeprecate non-context publish functions [\#259](https://github.com/rabbitmq/amqp091-go/pull/259) ([Zerpet](https://github.com/Zerpet)) +- Update Go directive [\#257](https://github.com/rabbitmq/amqp091-go/pull/257) ([Zerpet](https://github.com/Zerpet)) + +**Fixed bugs:** + +- republishing on reconnect bug in the example [\#249](https://github.com/rabbitmq/amqp091-go/issues/249) +- Channel Notify Close not receive event when connection is closed by RMQ server. [\#241](https://github.com/rabbitmq/amqp091-go/issues/241) +- Inconsistent documentation [\#231](https://github.com/rabbitmq/amqp091-go/issues/231) +- Data race in the client example [\#72](https://github.com/rabbitmq/amqp091-go/issues/72) +- Fix string function of URI [\#258](https://github.com/rabbitmq/amqp091-go/pull/258) ([Zerpet](https://github.com/Zerpet)) + +**Closed issues:** + +- Documentation needed \(`PublishWithContext` does not use context\) [\#195](https://github.com/rabbitmq/amqp091-go/issues/195) +- concurrent dispatch data race [\#226](https://github.com/rabbitmq/amqp091-go/issues/226) + +**Merged pull requests:** + +- Fix data race in example [\#260](https://github.com/rabbitmq/amqp091-go/pull/260) ([Zerpet](https://github.com/Zerpet)) +- Address CodeQL warning [\#252](https://github.com/rabbitmq/amqp091-go/pull/252) ([lukebakken](https://github.com/lukebakken)) +- Add support for additional AMQP URI query parameters [\#251](https://github.com/rabbitmq/amqp091-go/pull/251) ([vilius-g](https://github.com/vilius-g)) +- Example fix [\#250](https://github.com/rabbitmq/amqp091-go/pull/250) ([Boris-Plato](https://github.com/Boris-Plato)) +- Increasing the code coverage [\#248](https://github.com/rabbitmq/amqp091-go/pull/248) ([edercarloscosta](https://github.com/edercarloscosta)) +- Use correct mutex to guard confirms.published [\#240](https://github.com/rabbitmq/amqp091-go/pull/240) ([hjr265](https://github.com/hjr265)) +- Documenting Publishing.Expiration usage [\#232](https://github.com/rabbitmq/amqp091-go/pull/232) ([niksteff](https://github.com/niksteff)) +- fix comment typo in example\_client\_test.go [\#228](https://github.com/rabbitmq/amqp091-go/pull/228) ([wisaTong](https://github.com/wisaTong)) +- Bump go.uber.org/goleak from 1.2.1 to 1.3.0 [\#227](https://github.com/rabbitmq/amqp091-go/pull/227) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [v1.9.0](https://github.com/rabbitmq/amqp091-go/tree/v1.9.0) (2023-10-02) + +[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.8.1...v1.9.0) + +**Implemented enhancements:** + +- Use of buffered delivery channels when prefetch\_count is not null [\#200](https://github.com/rabbitmq/amqp091-go/issues/200) + +**Fixed bugs:** + +- connection block when write connection reset by peer [\#222](https://github.com/rabbitmq/amqp091-go/issues/222) +- Test failure on 32bit architectures [\#202](https://github.com/rabbitmq/amqp091-go/issues/202) + +**Closed issues:** + +- Add a constant to set consumer timeout as queue argument [\#201](https://github.com/rabbitmq/amqp091-go/issues/201) +- Add a constant for CQ version [\#199](https://github.com/rabbitmq/amqp091-go/issues/199) +- Examples may need to be updated after \#140 [\#153](https://github.com/rabbitmq/amqp091-go/issues/153) + +**Merged pull requests:** + +- Update spec091.go [\#224](https://github.com/rabbitmq/amqp091-go/pull/224) ([pinkfish](https://github.com/pinkfish)) +- Closes 222 [\#223](https://github.com/rabbitmq/amqp091-go/pull/223) ([yywing](https://github.com/yywing)) +- Update write.go [\#221](https://github.com/rabbitmq/amqp091-go/pull/221) ([pinkfish](https://github.com/pinkfish)) +- Bump versions [\#219](https://github.com/rabbitmq/amqp091-go/pull/219) ([lukebakken](https://github.com/lukebakken)) +- remove extra word 'accept' from ExchangeDeclare description [\#217](https://github.com/rabbitmq/amqp091-go/pull/217) ([a-sabzian](https://github.com/a-sabzian)) +- Misc Windows CI updates [\#216](https://github.com/rabbitmq/amqp091-go/pull/216) ([lukebakken](https://github.com/lukebakken)) +- Stop using deprecated Publish function [\#207](https://github.com/rabbitmq/amqp091-go/pull/207) ([Zerpet](https://github.com/Zerpet)) +- Constant for consumer timeout queue argument [\#206](https://github.com/rabbitmq/amqp091-go/pull/206) ([Zerpet](https://github.com/Zerpet)) +- Add a constant for CQ v2 queue argument [\#205](https://github.com/rabbitmq/amqp091-go/pull/205) ([Zerpet](https://github.com/Zerpet)) +- Fix example for 32-bit compatibility [\#204](https://github.com/rabbitmq/amqp091-go/pull/204) ([Zerpet](https://github.com/Zerpet)) +- Fix to increase timeout milliseconds since it's too tight [\#203](https://github.com/rabbitmq/amqp091-go/pull/203) ([t2y](https://github.com/t2y)) +- Add Channel.ConsumeWithContext to be able to cancel delivering [\#192](https://github.com/rabbitmq/amqp091-go/pull/192) ([t2y](https://github.com/t2y)) + ## [v1.8.1](https://github.com/rabbitmq/amqp091-go/tree/v1.8.1) (2023-05-04) [Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.8.0...v1.8.1) diff --git a/vendor/github.com/rabbitmq/amqp091-go/channel.go b/vendor/github.com/rabbitmq/amqp091-go/channel.go index 0dcec902555..3dfd7faf940 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/channel.go +++ b/vendor/github.com/rabbitmq/amqp091-go/channel.go @@ -7,7 +7,6 @@ package amqp091 import ( "context" - "errors" "reflect" "sync" "sync/atomic" @@ -971,9 +970,6 @@ func (ch *Channel) QueueBind(name, key, exchange string, noWait bool, args Table /* QueueUnbind removes a binding between an exchange and queue matching the key and arguments. - -It is possible to send and empty string for the exchange name which means to -unbind the queue from the default exchange. */ func (ch *Channel) QueueUnbind(name, key, exchange string, args Table) error { if err := args.Validate(); err != nil { @@ -1487,17 +1483,17 @@ confirmations start at 1. Exit when all publishings are confirmed. When Publish does not return an error and the channel is in confirm mode, the internal counter for DeliveryTags with the first confirmation starts at 1. - -Deprecated: Use PublishWithContext instead. */ func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { - _, err := ch.PublishWithDeferredConfirmWithContext(context.Background(), exchange, key, mandatory, immediate, msg) + _, err := ch.PublishWithDeferredConfirm(exchange, key, mandatory, immediate, msg) return err } /* PublishWithContext sends a Publishing from the client to an exchange on the server. +NOTE: this function is equivalent to [Channel.Publish]. Context is not honoured. + When you want a single message to be delivered to a single queue, you can publish to the default exchange with the routingKey of the queue name. This is because every declared queue gets an implicit route to the default exchange. @@ -1527,34 +1523,17 @@ confirmations start at 1. Exit when all publishings are confirmed. When Publish does not return an error and the channel is in confirm mode, the internal counter for DeliveryTags with the first confirmation starts at 1. */ -func (ch *Channel) PublishWithContext(ctx context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) error { - _, err := ch.PublishWithDeferredConfirmWithContext(ctx, exchange, key, mandatory, immediate, msg) - return err +func (ch *Channel) PublishWithContext(_ context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) error { + return ch.Publish(exchange, key, mandatory, immediate, msg) } /* -PublishWithDeferredConfirm behaves identically to Publish but additionally returns a -DeferredConfirmation, allowing the caller to wait on the publisher confirmation -for this message. If the channel has not been put into confirm mode, -the DeferredConfirmation will be nil. - -Deprecated: Use PublishWithDeferredConfirmWithContext instead. +PublishWithDeferredConfirm behaves identically to Publish, but additionally +returns a DeferredConfirmation, allowing the caller to wait on the publisher +confirmation for this message. If the channel has not been put into confirm +mode, the DeferredConfirmation will be nil. */ func (ch *Channel) PublishWithDeferredConfirm(exchange, key string, mandatory, immediate bool, msg Publishing) (*DeferredConfirmation, error) { - return ch.PublishWithDeferredConfirmWithContext(context.Background(), exchange, key, mandatory, immediate, msg) -} - -/* -PublishWithDeferredConfirmWithContext behaves identically to Publish but additionally returns a -DeferredConfirmation, allowing the caller to wait on the publisher confirmation -for this message. If the channel has not been put into confirm mode, -the DeferredConfirmation will be nil. -*/ -func (ch *Channel) PublishWithDeferredConfirmWithContext(ctx context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) (*DeferredConfirmation, error) { - if ctx == nil { - return nil, errors.New("amqp091-go: nil Context") - } - if err := msg.Headers.Validate(); err != nil { return nil, err } @@ -1598,6 +1577,19 @@ func (ch *Channel) PublishWithDeferredConfirmWithContext(ctx context.Context, ex return dc, nil } +/* +PublishWithDeferredConfirmWithContext behaves identically to Publish but additionally returns a +DeferredConfirmation, allowing the caller to wait on the publisher confirmation +for this message. If the channel has not been put into confirm mode, +the DeferredConfirmation will be nil. + +NOTE: PublishWithDeferredConfirmWithContext is equivalent to its non-context variant. The context passed +to this function is not honoured. +*/ +func (ch *Channel) PublishWithDeferredConfirmWithContext(_ context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) (*DeferredConfirmation, error) { + return ch.PublishWithDeferredConfirm(exchange, key, mandatory, immediate, msg) +} + /* Get synchronously receives a single Delivery from the head of a queue from the server to the client. In almost all cases, using Channel.Consume will be @@ -1829,8 +1821,8 @@ func (ch *Channel) Reject(tag uint64, requeue bool) error { // GetNextPublishSeqNo returns the sequence number of the next message to be // published, when in confirm mode. func (ch *Channel) GetNextPublishSeqNo() uint64 { - ch.confirms.m.Lock() - defer ch.confirms.m.Unlock() + ch.confirms.publishedMut.Lock() + defer ch.confirms.publishedMut.Unlock() return ch.confirms.published + 1 } diff --git a/vendor/github.com/rabbitmq/amqp091-go/connection.go b/vendor/github.com/rabbitmq/amqp091-go/connection.go index c8bb820d158..e167a23fcba 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/connection.go +++ b/vendor/github.com/rabbitmq/amqp091-go/connection.go @@ -28,11 +28,11 @@ const ( defaultHeartbeat = 10 * time.Second defaultConnectionTimeout = 30 * time.Second defaultProduct = "AMQP 0.9.1 Client" - buildVersion = "1.9.0" + buildVersion = "1.10.0" platform = "golang" // Safer default that makes channel leaks a lot easier to spot // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593. - defaultChannelMax = (2 << 10) - 1 + defaultChannelMax = uint16((2 << 10) - 1) defaultLocale = "en_US" ) @@ -49,7 +49,7 @@ type Config struct { // bindings on the server. Dial sets this to the path parsed from the URL. Vhost string - ChannelMax int // 0 max channels means 2^16 - 1 + ChannelMax uint16 // 0 max channels means 2^16 - 1 FrameSize int // 0 max bytes means unlimited Heartbeat time.Duration // less than 1s uses the server's interval @@ -157,8 +157,7 @@ func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (ne // scheme. It is equivalent to calling DialTLS(amqp, nil). func Dial(url string) (*Connection, error) { return DialConfig(url, Config{ - Heartbeat: defaultHeartbeat, - Locale: defaultLocale, + Locale: defaultLocale, }) } @@ -169,7 +168,6 @@ func Dial(url string) (*Connection, error) { // DialTLS uses the provided tls.Config when encountering an amqps:// scheme. func DialTLS(url string, amqps *tls.Config) (*Connection, error) { return DialConfig(url, Config{ - Heartbeat: defaultHeartbeat, TLSClientConfig: amqps, Locale: defaultLocale, }) @@ -186,7 +184,6 @@ func DialTLS(url string, amqps *tls.Config) (*Connection, error) { // amqps:// scheme. func DialTLS_ExternalAuth(url string, amqps *tls.Config) (*Connection, error) { return DialConfig(url, Config{ - Heartbeat: defaultHeartbeat, TLSClientConfig: amqps, SASL: []Authentication{&ExternalAuth{}}, }) @@ -195,7 +192,9 @@ func DialTLS_ExternalAuth(url string, amqps *tls.Config) (*Connection, error) { // DialConfig accepts a string in the AMQP URI format and a configuration for // the transport and connection setup, returning a new Connection. Defaults to // a server heartbeat interval of 10 seconds and sets the initial read deadline -// to 30 seconds. +// to 30 seconds. The heartbeat interval specified in the AMQP URI takes precedence +// over the value specified in the config. To disable heartbeats, you must use +// the AMQP URI and set heartbeat=0 there. func DialConfig(url string, config Config) (*Connection, error) { var err error var conn net.Conn @@ -206,18 +205,50 @@ func DialConfig(url string, config Config) (*Connection, error) { } if config.SASL == nil { - config.SASL = []Authentication{uri.PlainAuth()} + if uri.AuthMechanism != nil { + for _, identifier := range uri.AuthMechanism { + switch strings.ToUpper(identifier) { + case "PLAIN": + config.SASL = append(config.SASL, uri.PlainAuth()) + case "AMQPLAIN": + config.SASL = append(config.SASL, uri.AMQPlainAuth()) + case "EXTERNAL": + config.SASL = append(config.SASL, &ExternalAuth{}) + default: + return nil, fmt.Errorf("unsupported auth_mechanism: %v", identifier) + } + } + } else { + config.SASL = []Authentication{uri.PlainAuth()} + } } if config.Vhost == "" { config.Vhost = uri.Vhost } + if uri.Heartbeat.hasValue { + config.Heartbeat = uri.Heartbeat.value + } else { + if config.Heartbeat == 0 { + config.Heartbeat = defaultHeartbeat + } + } + + if config.ChannelMax == 0 { + config.ChannelMax = uri.ChannelMax + } + + connectionTimeout := defaultConnectionTimeout + if uri.ConnectionTimeout != 0 { + connectionTimeout = time.Duration(uri.ConnectionTimeout) * time.Millisecond + } + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) dialer := config.Dial if dialer == nil { - dialer = DefaultDial(defaultConnectionTimeout) + dialer = DefaultDial(connectionTimeout) } conn, err = dialer("tcp", addr) @@ -991,13 +1022,13 @@ func (c *Connection) openTune(config Config, auth Authentication) error { // When the server and client both use default 0, then the max channel is // only limited by uint16. - c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + c.Config.ChannelMax = pickUInt16(config.ChannelMax, tune.ChannelMax) if c.Config.ChannelMax == 0 { c.Config.ChannelMax = defaultChannelMax } - c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax) + c.Config.ChannelMax = minUInt16(c.Config.ChannelMax, maxChannelMax) - c.allocator = newAllocator(1, c.Config.ChannelMax) + c.allocator = newAllocator(1, int(c.Config.ChannelMax)) c.m.Unlock() @@ -1104,6 +1135,13 @@ func max(a, b int) int { return b } +func maxUInt16(a, b uint16) uint16 { + if a > b { + return a + } + return b +} + func min(a, b int) int { if a < b { return a @@ -1111,6 +1149,21 @@ func min(a, b int) int { return b } +func minUInt16(a, b uint16) uint16 { + if a < b { + return a + } + return b +} + +func pickUInt16(client, server uint16) uint16 { + if client == 0 || server == 0 { + return maxUInt16(client, server) + } else { + return minUInt16(client, server) + } +} + func pick(client, server int) int { if client == 0 || server == 0 { return max(client, server) diff --git a/vendor/github.com/rabbitmq/amqp091-go/doc.go b/vendor/github.com/rabbitmq/amqp091-go/doc.go index 8cb0b64f041..461173fcc5e 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/doc.go +++ b/vendor/github.com/rabbitmq/amqp091-go/doc.go @@ -95,12 +95,11 @@ prior to calling [Channel.PublishWithContext] or [Channel.Consume]. When Dial encounters an amqps:// scheme, it will use the zero value of a tls.Config. This will only perform server certificate and host verification. -Use DialTLS when you wish to provide a client certificate (recommended), -include a private certificate authority's certificate in the cert chain for -server validity, or run insecure by not verifying the server certificate dial -your own connection. DialTLS will use the provided tls.Config when it -encounters an amqps:// scheme and will dial a plain connection when it -encounters an amqp:// scheme. +Use DialTLS when you wish to provide a client certificate (recommended), include +a private certificate authority's certificate in the cert chain for server +validity, or run insecure by not verifying the server certificate. DialTLS will +use the provided tls.Config when it encounters an amqps:// scheme and will dial +a plain connection when it encounters an amqp:// scheme. SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html @@ -110,17 +109,18 @@ In order to be notified when a connection or channel gets closed, both structures offer the possibility to register channels using [Channel.NotifyClose] and [Connection.NotifyClose] functions: - notifyConnCloseCh := conn.NotifyClose(make(chan *amqp.Error)) + notifyConnCloseCh := conn.NotifyClose(make(chan *amqp.Error, 1)) No errors will be sent in case of a graceful connection close. In case of a non-graceful closure due to e.g. network issue, or forced connection closure from the Management UI, the error will be notified synchronously by the library. -The error is sent synchronously to the channel, so that the flow will wait until -the receiver consumes from the channel. To avoid deadlocks in the library, it is -necessary to consume from the channels. This could be done inside a -different goroutine with a select listening on the two channels inside a for -loop like: +The library sends to notification channels just once. After sending a +notification to all channels, the library closes all registered notification +channels. After receiving a notification, the application should create and +register a new channel. To avoid deadlocks in the library, it is necessary to +consume from the channels. This could be done inside a different goroutine with +a select listening on the two channels inside a for loop like: go func() { for notifyConnClose != nil || notifyChanClose != nil { @@ -141,13 +141,8 @@ loop like: } }() -Another approach is to use buffered channels: - - notifyConnCloseCh := conn.NotifyClose(make(chan *amqp.Error, 1)) - -The library sends to notification channels just once. After sending a notification -to all channels, the library closes all registered notification channels. After -receiving a notification, the application should create and register a new channel. +It is strongly recommended to use buffered channels to avoid deadlocks inside +the library. # Best practises for NotifyPublish notifications: diff --git a/vendor/github.com/rabbitmq/amqp091-go/types.go b/vendor/github.com/rabbitmq/amqp091-go/types.go index 8f43a726f84..1e15ed0ab76 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/types.go +++ b/vendor/github.com/rabbitmq/amqp091-go/types.go @@ -144,6 +144,19 @@ const ( flagReserved1 = 0x0004 ) +// Expiration. These constants can be used to set a messages expiration TTL. +// They should be viewed as a clarification of the expiration functionality in +// messages and their usage is not enforced by this pkg. +// +// The server requires a string value that is interpreted by the server as +// milliseconds. If no value is set, which translates to the nil value of +// string, the message will never expire by itself. This does not influence queue +// configured TTL configurations. +const ( + NeverExpire string = "" // empty value means never expire + ImmediatelyExpire string = "0" // 0 means immediately expire +) + // Queue captures the current server state of the queue on the server returned // from Channel.QueueDeclare or Channel.QueueInspect. type Queue struct { @@ -162,18 +175,25 @@ type Publishing struct { Headers Table // Properties - ContentType string // MIME content type - ContentEncoding string // MIME content encoding - DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) - Priority uint8 // 0 to 9 - CorrelationId string // correlation identifier - ReplyTo string // address to to reply to (ex: RPC) - Expiration string // message expiration spec - MessageId string // message identifier - Timestamp time.Time // message timestamp - Type string // message type name - UserId string // creating user id - ex: "guest" - AppId string // creating application id + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + // Expiration represents the message TTL in milliseconds. A value of "0" + // indicates that the message will immediately expire if the message arrives + // at its destination and the message is not directly handled by a consumer + // that currently has the capacatity to do so. If you wish the message to + // not expire on its own, set this value to any ttl value, empty string or + // use the corresponding constants NeverExpire and ImmediatelyExpire. This + // does not influence queue configured TTL values. + Expiration string + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id // The application specific payload of the message Body []byte @@ -533,3 +553,16 @@ type bodyFrame struct { } func (f *bodyFrame) channel() uint16 { return f.ChannelId } + +type heartbeatDuration struct { + value time.Duration + hasValue bool +} + +func newHeartbeatDurationFromSeconds(s int) heartbeatDuration { + v := time.Duration(s) * time.Second + return heartbeatDuration{ + value: v, + hasValue: true, + } +} diff --git a/vendor/github.com/rabbitmq/amqp091-go/uri.go b/vendor/github.com/rabbitmq/amqp091-go/uri.go index 87ef09e6fef..ddc4b1adbfc 100644 --- a/vendor/github.com/rabbitmq/amqp091-go/uri.go +++ b/vendor/github.com/rabbitmq/amqp091-go/uri.go @@ -7,6 +7,7 @@ package amqp091 import ( "errors" + "fmt" "net" "net/url" "strconv" @@ -32,16 +33,20 @@ var defaultURI = URI{ // URI represents a parsed AMQP URI string. type URI struct { - Scheme string - Host string - Port int - Username string - Password string - Vhost string - CertFile string // client TLS auth - path to certificate (PEM) - CACertFile string // client TLS auth - path to CA certificate (PEM) - KeyFile string // client TLS auth - path to private key (PEM) - ServerName string // client TLS auth - server name + Scheme string + Host string + Port int + Username string + Password string + Vhost string + CertFile string // client TLS auth - path to certificate (PEM) + CACertFile string // client TLS auth - path to CA certificate (PEM) + KeyFile string // client TLS auth - path to private key (PEM) + ServerName string // client TLS auth - server name + AuthMechanism []string + Heartbeat heartbeatDuration + ConnectionTimeout int + ChannelMax uint16 } // ParseURI attempts to parse the given AMQP URI according to the spec. @@ -62,6 +67,10 @@ type URI struct { // keyfile: // cacertfile: // server_name_indication: +// auth_mechanism: +// heartbeat: +// connection_timeout: +// channel_max: // // If cacertfile is not provided, system CA certificates will be used. // Mutual TLS (client auth) will be enabled only in case keyfile AND certfile provided. @@ -134,6 +143,31 @@ func ParseURI(uri string) (URI, error) { builder.KeyFile = params.Get("keyfile") builder.CACertFile = params.Get("cacertfile") builder.ServerName = params.Get("server_name_indication") + builder.AuthMechanism = params["auth_mechanism"] + + if params.Has("heartbeat") { + value, err := strconv.Atoi(params.Get("heartbeat")) + if err != nil { + return builder, fmt.Errorf("heartbeat is not an integer: %v", err) + } + builder.Heartbeat = newHeartbeatDurationFromSeconds(value) + } + + if params.Has("connection_timeout") { + value, err := strconv.Atoi(params.Get("connection_timeout")) + if err != nil { + return builder, fmt.Errorf("connection_timeout is not an integer: %v", err) + } + builder.ConnectionTimeout = value + } + + if params.Has("channel_max") { + value, err := strconv.ParseUint(params.Get("channel_max"), 10, 16) + if err != nil { + return builder, fmt.Errorf("connection_timeout is not an integer: %v", err) + } + builder.ChannelMax = uint16(value) + } return builder, nil } @@ -192,5 +226,29 @@ func (uri URI) String() string { authority.Path = "/" } + if uri.CertFile != "" || uri.KeyFile != "" || uri.CACertFile != "" || uri.ServerName != "" { + rawQuery := strings.Builder{} + if uri.CertFile != "" { + rawQuery.WriteString("certfile=") + rawQuery.WriteString(uri.CertFile) + rawQuery.WriteRune('&') + } + if uri.KeyFile != "" { + rawQuery.WriteString("keyfile=") + rawQuery.WriteString(uri.KeyFile) + rawQuery.WriteRune('&') + } + if uri.CACertFile != "" { + rawQuery.WriteString("cacertfile=") + rawQuery.WriteString(uri.CACertFile) + rawQuery.WriteRune('&') + } + if uri.ServerName != "" { + rawQuery.WriteString("server_name_indication=") + rawQuery.WriteString(uri.ServerName) + } + authority.RawQuery = rawQuery.String() + } + return authority.String() } diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md index 297438a9fc4..e1652b179ad 100644 --- a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md +++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md @@ -1,3 +1,12 @@ +## Unreleased + +### Changed + +* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980)) + Users can use the OpenTelemetry sampler to control the sampling behavior. + For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep + a similar behavior (drop orphan spans) of `go-redis` as before. + ## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index dc2fe780aab..d8d007596bb 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -1,7 +1,12 @@ GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) test: testdeps + $(eval GO_VERSION := $(shell go version | cut -d " " -f 3 | cut -d. -f2)) set -e; for dir in $(GO_MOD_DIRS); do \ + if echo "$${dir}" | grep -q "./example" && [ "$(GO_VERSION)" = "19" ]; then \ + echo "Skipping go test in $${dir} due to Go version 1.19 and dir contains ./example"; \ + continue; \ + fi; \ echo "go test in $${dir}"; \ (cd "$${dir}" && \ go mod tidy -compat=1.18 && \ @@ -26,7 +31,7 @@ build: testdata/redis: mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@ + wget -qO- https://download.redis.io/releases/redis-7.4-rc2.tar.gz | tar xvz --strip-components=1 -C $@ testdata/redis/src/redis-server: testdata/redis cd $< && make all diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index 043d3f0e6d5..e7df5dfd607 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -143,9 +143,6 @@ to this specification. ```go import ( - "context" - "fmt" - "github.com/redis/go-redis/v9" ) diff --git a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go index d9fc50dce51..a215582890e 100644 --- a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go +++ b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go @@ -16,6 +16,7 @@ type BitMapCmdable interface { BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd + BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd } func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { @@ -45,22 +46,19 @@ const BitCountIndexByte string = "BYTE" const BitCountIndexBit string = "BIT" func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { - args := []interface{}{"bitcount", key} + args := make([]any, 2, 5) + args[0] = "bitcount" + args[1] = key if bitCount != nil { - if bitCount.Unit == "" { - bitCount.Unit = "BYTE" - } - if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit { - cmd := NewIntCmd(ctx) - cmd.SetErr(errors.New("redis: invalid bitcount index")) - return cmd + args = append(args, bitCount.Start, bitCount.End) + if bitCount.Unit != "" { + if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit { + cmd := NewIntCmd(ctx) + cmd.SetErr(errors.New("redis: invalid bitcount index")) + return cmd + } + args = append(args, bitCount.Unit) } - args = append( - args, - bitCount.Start, - bitCount.End, - string(bitCount.Unit), - ) } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index 9fb9a8310b7..59ba0896952 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -573,6 +573,10 @@ func (cmd *StatusCmd) Result() (string, error) { return cmd.val, cmd.err } +func (cmd *StatusCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + func (cmd *StatusCmd) String() string { return cmdString(cmd, cmd.val) } @@ -4997,6 +5001,7 @@ type ClientInfo struct { PSub int // number of pattern matching subscriptions SSub int // redis version 7.0.3, number of shard channel subscriptions Multi int // number of commands in a MULTI/EXEC context + Watch int // redis version 7.4 RC1, number of keys this client is currently watching. QueryBuf int // qbuf, query buffer length (0 means no query pending) QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) @@ -5149,6 +5154,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.SSub, err = strconv.Atoi(val) case "multi": info.Multi, err = strconv.Atoi(val) + case "watch": + info.Watch, err = strconv.Atoi(val) case "qbuf": info.QueryBuf, err = strconv.Atoi(val) case "qbuf-free": @@ -5454,9 +5461,12 @@ func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) for { cmd.mu.Lock() st := cmd.status + pk, _ := rd.Peek(1) cmd.mu.Unlock() - if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart { + if len(pk) != 0 && st == monitorStatusStart { + cmd.mu.Lock() line, err := rd.ReadString() + cmd.mu.Unlock() if err != nil { return err } diff --git a/vendor/github.com/redis/go-redis/v9/error.go b/vendor/github.com/redis/go-redis/v9/error.go index 8a59913be81..9b348193a4b 100644 --- a/vendor/github.com/redis/go-redis/v9/error.go +++ b/vendor/github.com/redis/go-redis/v9/error.go @@ -7,6 +7,7 @@ import ( "net" "strings" + "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" ) @@ -129,7 +130,9 @@ func isMovedError(err error) (moved bool, ask bool, addr string) { if ind == -1 { return false, false, "" } + addr = s[ind+1:] + addr = internal.GetAddr(addr) return } diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go index 2c62a75ad9c..dcffdcdd98e 100644 --- a/vendor/github.com/redis/go-redis/v9/hash_commands.go +++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go @@ -1,6 +1,9 @@ package redis -import "context" +import ( + "context" + "time" +) type HashCmdable interface { HDel(ctx context.Context, key string, fields ...string) *IntCmd @@ -16,9 +19,23 @@ type HashCmdable interface { HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HVals(ctx context.Context, key string) *StringSliceCmd HRandField(ctx context.Context, key string, count int) *StringSliceCmd HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd + HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd } func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { @@ -172,3 +189,262 @@ func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match str _ = c(ctx, cmd) return cmd } + +func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + args = append(args, "novalues") + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +type HExpireArgs struct { + NX bool + XX bool + GT bool + LT bool +} + +// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// For more information - https://redis.io/commands/hexpire/ +func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// For more information - https://redis.io/commands/hexpire/ +func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpire - Sets the expiration time for specified fields in a hash in milliseconds. +// Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields. +// The command modifies the standard time.Duration to milliseconds for the Redis command. +// For more information - https://redis.io/commands/hpexpire/ +func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds. +// Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields. +// The command sets absolute expiration times based on the UNIX timestamp provided. +// For more information - https://redis.io/commands/hexpireat/ +func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + + args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIREAT", key, tm.Unix()} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds. +// Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds. +// For more information - https://redis.io/commands/hpexpireat/ +func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPersist - Removes the expiration time from specified fields in a hash. +// Accepts a key and the fields themselves. +// This command ensures that each field specified will have its expiration removed if present. +// For more information - https://redis.io/commands/hpersist/ +func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds. +// Requires a key and the fields themselves to fetch their expiration timestamps. +// This command returns the expiration times for each field or error/status codes for each field as specified. +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds. +// Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters. +// Provides the expiration timestamp for each field in milliseconds. +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds. +// Requires a key and the fields themselves. It returns the TTL for each specified field. +// This command fetches the TTL in seconds for each field or returns error/status codes as appropriate. +// For more information - https://redis.io/commands/httl/ +func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds. +// Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields. +// This command provides the TTL in milliseconds for each field or returns error/status codes as needed. +// For more information - https://redis.io/commands/hpttl/ +func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go index 7f45bc0bb79..d315c793729 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go @@ -3,8 +3,10 @@ package pool import ( "bufio" "context" + "crypto/tls" "net" "sync/atomic" + "syscall" "time" "github.com/redis/go-redis/v9/internal/proto" @@ -16,6 +18,9 @@ type Conn struct { usedAt int64 // atomic netConn net.Conn + // for checking the health status of the connection, it may be nil. + sysConn syscall.Conn + rd *proto.Reader bw *bufio.Writer wr *proto.Writer @@ -34,6 +39,7 @@ func NewConn(netConn net.Conn) *Conn { cn.bw = bufio.NewWriter(netConn) cn.wr = proto.NewWriter(cn.bw) cn.SetUsedAt(time.Now()) + cn.setSysConn() return cn } @@ -50,6 +56,22 @@ func (cn *Conn) SetNetConn(netConn net.Conn) { cn.netConn = netConn cn.rd.Reset(netConn) cn.bw.Reset(netConn) + cn.setSysConn() +} + +func (cn *Conn) setSysConn() { + cn.sysConn = nil + conn := cn.netConn + if conn == nil { + return + } + if tlsConn, ok := conn.(*tls.Conn); ok { + conn = tlsConn.NetConn() + } + + if sysConn, ok := conn.(syscall.Conn); ok { + cn.sysConn = sysConn + } } func (cn *Conn) Write(b []byte) (int, error) { diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go index 83190d39482..f28833850bf 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go @@ -5,21 +5,12 @@ package pool import ( "errors" "io" - "net" "syscall" - "time" ) var errUnexpectedRead = errors.New("unexpected read from socket") -func connCheck(conn net.Conn) error { - // Reset previous timeout. - _ = conn.SetDeadline(time.Time{}) - - sysConn, ok := conn.(syscall.Conn) - if !ok { - return nil - } +func connCheck(sysConn syscall.Conn) error { rawConn, err := sysConn.SyscallConn() if err != nil { return err diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go index 295da1268e7..2d270cf56fc 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go @@ -2,8 +2,8 @@ package pool -import "net" +import "syscall" -func connCheck(conn net.Conn) error { +func connCheck(_ syscall.Conn) error { return nil } diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go index 2125f3e133b..9b84993cc60 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go @@ -499,6 +499,8 @@ func (p *ConnPool) Close() error { return firstErr } +var zeroTime = time.Time{} + func (p *ConnPool) isHealthyConn(cn *Conn) bool { now := time.Now() @@ -509,8 +511,12 @@ func (p *ConnPool) isHealthyConn(cn *Conn) bool { return false } - if connCheck(cn.netConn) != nil { - return false + if cn.sysConn != nil { + // reset previous timeout. + _ = cn.netConn.SetDeadline(zeroTime) + if connCheck(cn.sysConn) != nil { + return false + } } cn.SetUsedAt(now) diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go index ed81ad7aa1f..235a91afa9f 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util.go @@ -2,6 +2,7 @@ package internal import ( "context" + "net" "strings" "time" @@ -64,3 +65,19 @@ func ReplaceSpaces(s string) string { return builder.String() } + +func GetAddr(addr string) string { + ind := strings.LastIndexByte(addr, ':') + if ind == -1 { + return "" + } + + if strings.IndexByte(addr, '.') != -1 { + return addr + } + + if addr[0] == '[' { + return addr + } + return net.JoinHostPort(addr[:ind], addr[ind+1:]) +} diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go index dff52ae8ba8..6ed693a0b04 100644 --- a/vendor/github.com/redis/go-redis/v9/options.go +++ b/vendor/github.com/redis/go-redis/v9/options.go @@ -61,6 +61,12 @@ type Options struct { // before reconnecting. It should return the current username and password. CredentialsProvider func() (username string, password string) + // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, + // done to maintain API compatibility. In the future, + // there might be a merge between CredentialsProviderContext and CredentialsProvider. + // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + // Database to be selected after connecting to the server. DB int @@ -235,7 +241,7 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er } } -// ParseURL parses an URL into Options that can be used to connect to Redis. +// ParseURL parses a URL into Options that can be used to connect to Redis. // Scheme is required. // There are two connection types: by tcp socket and by unix socket. // Tcp connection: @@ -250,12 +256,12 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); -// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other -// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error // diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go index 17f98d9dc82..ce258ff3634 100644 --- a/vendor/github.com/redis/go-redis/v9/osscluster.go +++ b/vendor/github.com/redis/go-redis/v9/osscluster.go @@ -62,10 +62,11 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string - CredentialsProvider func() (username string, password string) + Protocol int + Username string + Password string + CredentialsProvider func() (username string, password string) + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) MaxRetries int MinRetryBackoff time.Duration @@ -157,12 +158,12 @@ func (opt *ClusterOptions) init() { // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); -// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other -// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error // @@ -272,10 +273,11 @@ func (opt *ClusterOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, - CredentialsProvider: opt.CredentialsProvider, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, @@ -339,6 +341,8 @@ func (n *clusterNode) Close() error { return n.Client.Close() } +const maximumNodeLatency = 1 * time.Minute + func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 @@ -359,7 +363,7 @@ func (n *clusterNode) updateLatency() { if successes == 0 { // If none of the pings worked, set latency to some arbitrarily high value so this node gets // least priority. - latency = float64((1 * time.Minute) / time.Microsecond) + latency = float64((maximumNodeLatency) / time.Microsecond) } else { latency = float64(dur) / float64(successes) } @@ -733,20 +737,40 @@ func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { return c.nodes.Random() } - var node *clusterNode + var allNodesFailing = true + var ( + closestNonFailingNode *clusterNode + closestNode *clusterNode + minLatency time.Duration + ) + + // setting the max possible duration as zerovalue for minlatency + minLatency = time.Duration(math.MaxInt64) + for _, n := range nodes { - if n.Failing() { - continue - } - if node == nil || n.Latency() < node.Latency() { - node = n + if closestNode == nil || n.Latency() < minLatency { + closestNode = n + minLatency = n.Latency() + if !n.Failing() { + closestNonFailingNode = n + allNodesFailing = false + } } } - if node != nil { - return node, nil + + // pick the healthly node with the lowest latency + if !allNodesFailing && closestNonFailingNode != nil { + return closestNonFailingNode, nil + } + + // if all nodes are failing, we will pick the temporarily failing node with lowest latency + if minLatency < maximumNodeLatency && closestNode != nil { + internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency") + return closestNode, nil } - // If all nodes are failing - return random node + // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster + internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster") return c.nodes.Random() } @@ -914,10 +938,13 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { slot := c.cmdSlot(ctx, cmd) var node *clusterNode + var moved bool var ask bool var lastErr error for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { + // MOVED and ASK responses are not transient errors that require retry delay; they + // should be attempted immediately. + if attempt > 0 && !moved && !ask { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return err } @@ -961,7 +988,6 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { continue } - var moved bool var addr string moved, ask, addr = isMovedError(lastErr) if moved || ask { @@ -1295,6 +1321,7 @@ func (c *ClusterClient) processPipelineNode( _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { + node.MarkAsFailing() _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err @@ -1316,6 +1343,9 @@ func (c *ClusterClient) processPipelineNodeConn( if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { + if isBadConn(err, false, node.Client.getAddr()) { + node.MarkAsFailing() + } if shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) } @@ -1347,7 +1377,7 @@ func (c *ClusterClient) pipelineReadCmds( continue } - if c.opt.ReadOnly { + if c.opt.ReadOnly && isBadConn(err, false, node.Client.getAddr()) { node.MarkAsFailing() } diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go index 5df537c422c..72b18f49a73 100644 --- a/vendor/github.com/redis/go-redis/v9/pubsub.go +++ b/vendor/github.com/redis/go-redis/v9/pubsub.go @@ -84,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er } func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { - return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }) } @@ -491,7 +491,7 @@ func (c *PubSub) getContext() context.Context { // Receive* APIs can not be used after channel is created. // // go-redis periodically sends ping messages to test connection health -// and re-subscribes if ping can not not received for 1 minute. +// and re-subscribes if ping can not received for 1 minute. func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message { c.chOnce.Do(func() { c.msgCh = newChannel(c, opts...) diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go index d25a0d31424..527afb677fb 100644 --- a/vendor/github.com/redis/go-redis/v9/redis.go +++ b/vendor/github.com/redis/go-redis/v9/redis.go @@ -283,8 +283,13 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { } cn.Inited = true + var err error username, password := c.opt.Username, c.opt.Password - if c.opt.CredentialsProvider != nil { + if c.opt.CredentialsProviderContext != nil { + if username, password, err = c.opt.CredentialsProviderContext(ctx); err != nil { + return err + } + } else if c.opt.CredentialsProvider != nil { username, password = c.opt.CredentialsProvider() } @@ -300,7 +305,7 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { // for redis-server versions that do not support the HELLO command, // RESP2 will continue to be used. - if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil { + if err = conn.Hello(ctx, protocol, username, password, "").Err(); err == nil { auth = true } else if !isRedisError(err) { // When the server responds with the RESP protocol and the result is not a normal @@ -313,7 +318,7 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { return err } - _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { + _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error { if !auth && password != "" { if username != "" { pipe.AuthACL(ctx, username, password) diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go index 0a9869202a8..6d7b2292249 100644 --- a/vendor/github.com/redis/go-redis/v9/stream_commands.go +++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go @@ -137,10 +137,11 @@ type XReadArgs struct { Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 Count int64 Block time.Duration + ID string } func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 6+len(a.Streams)) + args := make([]interface{}, 0, 2*len(a.Streams)+6) args = append(args, "xread") keyPos := int8(1) @@ -159,6 +160,11 @@ func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { for _, s := range a.Streams { args = append(args, s) } + if a.ID != "" { + for range a.Streams { + args = append(args, a.ID) + } + } cmd := NewXStreamSliceCmd(ctx, args...) if a.Block >= 0 { @@ -178,36 +184,42 @@ func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSl func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index e2c7f3e718d..b1234dac3af 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.5.1" + return "9.6.1" } diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 00000000000..2d7bb2bf572 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 00000000000..369e3d55190 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 00000000000..937942c2b2c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 00000000000..2a9f2dc3b94 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1352 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +//go:generate stringer -type=Operation -trimprefix=Diff + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 + //IndexSeparator is used to seperate the array indexes in an index string + IndexSeparator = "," +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// splice removes amount elements from slice at index index, replacing them with elements. +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + if len(elements) == amount { + // Easy case: overwrite the relevant items. + copy(slice[index:], elements) + return slice + } + if len(elements) < amount { + // Fewer new items than old. + // Copy in the new items. + copy(slice[index:], elements) + // Shift the remaining items left. + copy(slice[index+len(elements):], slice[index+amount:]) + // Calculate the new end of the slice. + end := len(slice) - amount + len(elements) + // Zero stranded elements at end so that they can be garbage collected. + tail := slice[end:] + for i := range tail { + tail[i] = Diff{} + } + return slice[:end] + } + // More new items than old. + // Make room in slice for new elements. + // There's probably an even more efficient way to do this, + // but this is simple and clear. + need := len(slice) - amount + len(elements) + for len(slice) < need { + slice = append(slice, Diff{}) + } + // Shift slice elements right to make room for new elements. + copy(slice[index+len(elements):], slice[index+amount:]) + // Copy in new elements. + copy(slice[index:], elements) + return slice +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + {DiffDelete, string(text1)}, + {DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + diffs := diffsA + diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) + diffs = append(diffs, diffsB...) + return diffs + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2)) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + {DiffDelete, string(runes1)}, + {DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return chars1, chars2, lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return []rune(chars1), []rune(chars2), lineArray +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := strings.Split(aDiff.Text, IndexSeparator) + text := make([]string, len(chars)) + + for i, r := range chars { + i1, err := strconv.Atoi(r) + if err == nil { + text[i] = lineArray[i1] + } + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + // Linear search. See comment in commonSuffixLength. + n := 0 + for ; n < len(text1) && n < len(text2); n++ { + if text1[n] != text2[n] { + return n + } + } + return n +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. + // See discussion at https://github.com/sergi/go-diff/issues/54. + i1 := len(text1) + i2 := len(text2) + for n := 0; ; n++ { + i1-- + i2-- + if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { + return n + } + } +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := make([]int, 0, len(diffs)) + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + equalities = append(equalities, pointer) + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text) + } else { + lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if utf8.RuneCountInString(lastequality) > 0 && + (utf8.RuneCountInString(lastequality) <= difference1) && + (utf8.RuneCountInString(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities[len(equalities)-1] + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities[:len(equalities)-1] + + if len(equalities) > 0 { + equalities = equalities[:len(equalities)-1] + } + pointer = -1 + if len(equalities) > 0 { + pointer = equalities[len(equalities)-1] + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = splice(diffs, pointer, 0, overlap) + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += utf8.RuneCountInString(aDiff.Text) + case DiffDelete: + deletions += utf8.RuneCountInString(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} + +// diffLinesToStrings splits two texts into a list of strings. Each string represents one line. +func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + + //Each string has the index of lineArray which it points to + strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray) + strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray) + + return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray +} + +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string) []uint32 { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + lineStart := 0 + lineEnd := -1 + strs := []uint32{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + strs = append(strs, uint32(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + strs = append(strs, uint32(len(*lineArray)-1)) + } + } + + return strs +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 00000000000..d3acc32ce13 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 00000000000..17374e109fe --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go new file mode 100644 index 00000000000..533ec0da7b3 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. + +package diffmatchpatch + +import "fmt" + +const _Operation_name = "DeleteEqualInsert" + +var _Operation_index = [...]uint8{0, 6, 11, 17} + +func (i Operation) String() string { + i -= -1 + if i < 0 || i >= Operation(len(_Operation_index)-1) { + return fmt.Sprintf("Operation(%d)", i+-1) + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 00000000000..0dbe3bdd7de --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(rune(x)) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 00000000000..44c43595478 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,106 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} + +func intArrayToString(ns []uint32) string { + if len(ns) == 0 { + return "" + } + + indexSeparator := IndexSeparator[0] + + // Appr. 3 chars per num plus the comma. + b := []byte{} + for _, n := range ns { + b = strconv.AppendInt(b, int64(n), 10) + b = append(b, indexSeparator) + } + b = b[:len(b)-1] + return string(b) +} diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md index 96b2e4dc330..7701cae4366 100644 --- a/vendor/github.com/tidwall/gjson/README.md +++ b/vendor/github.com/tidwall/gjson/README.md @@ -1,7 +1,9 @@

-GJSON + + + + GJSON +
GoDoc GJSON Playground diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md index 6721d7f5143..a3f0fac2380 100644 --- a/vendor/github.com/tidwall/gjson/SYNTAX.md +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -1,6 +1,6 @@ # GJSON Path Syntax -A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload. +A GJSON Path is a text string syntax that describes a search pattern for quickly retrieving values from a JSON payload. This document is designed to explain the structure of a GJSON Path through examples. @@ -15,12 +15,12 @@ This document is designed to explain the structure of a GJSON Path through examp - [Multipaths](#multipaths) - [Literals](#literals) -The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +The definitive implementation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. ## Path structure -A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character. +A GJSON Path is intended to be easily expressed as a series of components separated by a `.` character. Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, `!`, and `?`. @@ -46,7 +46,7 @@ The following GJSON Paths evaluate to the accompanying values. ### Basic -In many cases you'll just want to retreive values by object name or array index. +In many cases you'll just want to retrieve values by object name or array index. ```go name.last "Anderson" diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go index 4acd087c05c..779fe617fe2 100644 --- a/vendor/github.com/tidwall/gjson/gjson.go +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -1252,7 +1252,7 @@ func parseObject(c *parseContext, i int, path string) (int, bool) { } // matchLimit will limit the complexity of the match operation to avoid ReDos -// attacks from arbritary inputs. +// attacks from arbitrary inputs. // See the github.com/tidwall/match.MatchLimit function for more information. func matchLimit(str, pattern string) bool { matched, _ := match.MatchLimit(str, pattern, 10000) @@ -1917,6 +1917,16 @@ func appendHex16(dst []byte, x uint16) []byte { ) } +// DisableEscapeHTML will disable the automatic escaping of certain +// "problamatic" HTML characters when encoding to JSON. +// These character include '>', '<' and '&', which get escaped to \u003e, +// \u0026, and \u003c respectively. +// +// This is a global flag and will affect all further gjson operations. +// Ideally, if used, it should be set one time before other gjson functions +// are called. +var DisableEscapeHTML = false + // AppendJSONString is a convenience function that converts the provided string // to a valid JSON string and appends it to dst. func AppendJSONString(dst []byte, s string) []byte { @@ -1940,7 +1950,8 @@ func AppendJSONString(dst []byte, s string) []byte { dst = append(dst, 'u') dst = appendHex16(dst, uint16(s[i])) } - } else if s[i] == '>' || s[i] == '<' || s[i] == '&' { + } else if !DisableEscapeHTML && + (s[i] == '>' || s[i] == '<' || s[i] == '&') { dst = append(dst, '\\', 'u') dst = appendHex16(dst, uint16(s[i])) } else if s[i] == '\\' { @@ -2194,7 +2205,7 @@ func unescape(json string) string { } // Less return true if a token is less than another token. -// The caseSensitive paramater is used when the tokens are Strings. +// The caseSensitive parameter is used when the tokens are Strings. // The order when comparing two different type is: // // Null < False < Number < String < True < JSON @@ -3353,7 +3364,7 @@ func (t Result) Path(json string) string { goto fail } if !strings.HasPrefix(json[t.Index:], t.Raw) { - // Result is not at the JSON index as exepcted. + // Result is not at the JSON index as expected. goto fail } for ; i >= 0; i-- { diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png deleted file mode 100644 index 17a8bbe9d651e5d79cbd59a3645a152443440ad6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15936 zcmdseRa9I})9xU_0znfrFi3EBcXtB8-Q8tyw?MEE+&u)>;O-g-!QF!n?wrm0ecyln zi+`Pqb92@@b204MboZ|AuIj4isoIgsic)AO1SlX72u(&>{38ei4_w2jBEbT8g#0}> zfj{`J5}K~64(6^NM$Tp+5mN_aGq8-Ek%ieuGb2+^ry(I^6HUo1oax$2(u&{u+covw&X$WR|Y3j=W4v9v&Wy9&C&b&K688JUl#1%&bhTtPDU41{Y6zS0f06 zy$kt&Mi4i1F>$tXbhUD@2fvJHWbEMPDnJHE`mZV2IVvdp*TD8J|6V9y$(SHUj!Z0! z%uIH6FZ24RwTtUVv;Qr||Jd3^&C}70>7$v8gPXGnupj2+|LF{@-T(PPFAV`{c$J*3 zfK4&76?ZUkvoo`Il@S*p1OCHkYGukR$|buuylm3H z<}7aJY~^ldD(UQC2mWW3d9D5jDR|g;*tw0_nHkJkjZ7HWS$Vh_jJR2u8Q6HtIgQO& zSd4j$Sjqm~-}Jw&-oLaIxC6|@@jn9bnzJ+W7@M$}GZ-0hn=-JlaPctk7_+f5@NjXm zahsTP8Jln!kud=xGQB9ye^aFY+yb=o6ILj1BiHWiOsHjwSN(6-S#&IQQX(?!~oRWla6k zvj3m#lk>9cCv!wc6L6R*c~`T5NT-O!e%&k`LRLWCk5V)xyH@qD}#ba7*ern zD^KwwL26x@_H9n^_xJEwj%`LF z2_M&3l{$q7;~URr@StEw0z3u!#j3ZdE5%$S1w0iM^K2@_(3KW1T0xFBPH3#1nB2_f zvBMJo!z+(+E_6G$0(R-JL<$OXHd^e=iug&_x%->yfVBZnFV3sIHbvZbAFwXaW7AoKsJ9QG z>$+mQV_~P4&z8jBcSvrf!^$eZC-BSslJQJOm4Sir>t!xQeA0}meB*bk78Ll6JYotw zFN_tm1+Ok3dz)A2wN_Xinfj#Ee?*6SsMzd_g;n4n@*KNU9c5|uUDdVb5;09jU=BA8 z7Ip^CUs>Q(aGI&ybtyH$7K|L2U(LdFslcD0RJYaHH%xx{O^TZmsdabO$yfZSTX|)r z=-oR$toP)*A6N)!Ci&fYk#B|NMo}VC;kw_(;DJ=HK8MnXxCRjx zm+Ele5%M~`%RXZFhRaja zofW}k`izaw_BnN>(4NODngeC2ev#$a0Za@}FunO>tS9?CYq6*J5?4v-!SCmNEg}($ zsiIfoly7`UXRh?F`!)YcJa>S4yLz&L#;kL&6#~9Ni+?|0z}yLl+KUVh*{UN zQL82>H7J0_BwzsZbE2-owg@HT^}ncXoZ;dh4@Ag(^&lQPK)^E=LOJs3m1N!Uj9F54V7Fy*9;Hf!S| z2Vtfjj8{AZ6|UJ&-*wZR;=h8&K-WF?$U44F^rNysF*k#YfwM3ww(UIiz!K$Vl6g^; zpZSmDI41>YtUMi>*8?muaBUxB;C6#-g+)6l$2v@q$uZDbJ6wES8#l*s2D<1?VzXJ$ zNn3AE*NNnAtmKenlM+7=mM9>ZV6zb+`lI$2@hpIeP1DdcS*Cvz5A~9(XQ5ee8Zy?1 zV$H!Cd=InD(OPcd;^t`I|2d8dNC%ws6z&4#gegDv>rH+oz!8Nz>NP}eD-R;bVvA0S z5fJb?Ou@|fK(P*e**ICmfISbcs}Y$fZuREW@ZFBDNYmhXW7PA6V7+}jLHzU1y=p!n z^hyRvQ|hIqYYoin6oO1NuT}m&C#3Y93YZnNA2Tz$8cr96%FkEFIxLhO1c8xa?YS>1 zYfbRvnrv%W@wwZlMg$41zv!F1Uthy~PJ0n;XM;%WG#G1Z(D~^_heW34*YaC}4fwaY zQ_|5S2@Q6+L&grf$wpF2KXm2n`%skl-*5HsEQC3gz~7nJ8i!$efQc!-p`}FGdT|bp zjc+K291ok>nAU4-{|as^#|^q`l>3ommlA=!Yk*~f4lIlN?BhIKO<)tnThs?ySx%(Q zZ{BqMNZOg7QO=}w219Yn;z5ayrjclN12jx{->DqdA?>)@B*M55Z6*>9uOVG^g4+gB$$ z&!XEFAyF0vK@Hi45CHqg@K*IEb;v3e@Qc{QP%M+qSM^o+6flzV`6>&uh)6U4UOl>Y zUko)LSj?`l;=xRs21!m{!rvOi>)at<`A6r#TDg{HRsnKvk6eoC&S-x#Mq{7vf>j;i zpoU-IgJbwqGu*^D+T5dUG0a_I-j;HEIj*%e?#p&>hhO%ho;0*k&ga~7^2w21Ks!^N zJ88HQ;e-ECm0XE^;oQz-+JymauGTlg2o=jQD#D!&eneT>3#Y{`@=}~!gct3epErdK zU`nI?9GgnOgA%X>7A8>kKcDjv8zn1;!d*!BC&n1*8?K}4xYS*8J-uGJt;RUTVXir%{}C0=q5B02D<=xOiYxcn?4l1}Em4lV07IMeqs8t@G*e)leU z^LZ&bK;v9xiiAiYvcgAIY^ z{hsN<54t}2vf2`*A~p<)503a+JOop=cS87$R%0Y9b}n%u{HMx2QvUVcnksc$0lfh| zO+0575+Z)~?&@C8M99dsa?f$2%sd?hq|Ds&;G=9fK537ECE{4uO>1(q_^&|Vlh_Gr>&p4j#CTe1bR2DJ_0}1+G9&6IH~WZ{A=YBXE4q6-nx9bn^kCGD zKq`a|i1oH?5f(Dp2PUamK}6jL3(BY6_{%!xtDA#drfkJ=#M6{`HRjVF)bpUQq&Ef6 zyRCSNEr>MF%a_m1u9p-Py~pK8D7c5rSo^`&g9y&-@)_~;#h*R~7VEJ@5*>Y+Ig5{* z{KaPMCB3%XAcWYoc^46OK;ZSTx9HGO-}GxhBWm@2u4qc+2r6b_aBFo$7yfX_H3CEQ zaAx(~OTHSE^w9jPe|Db0^FFGfRAv|V-o14(YFFLKu2^KV)OYV$MilR}0%8L8mXrX# z?aXNsv{y&t%MTik%-A};(yQA&Eexy&Tel+t@?5=F>qE*mRdi^0cko1yYtB`K{ocK2 ze?^g1EeKeXFLrFjvhUiy$_GM26OFkX+qLLsXfuf_ zAiE6m$DGN>RDMk}<35n$|sHc(gYzBw5(l8USpk56G_tt5v zT2RPk>)0{R5nwf2>%09ZBb^t1O4UtbrXz%J=bQ7Aj>{^+T`g!YD&6-HAA&h~?oH{F z-*(;OvA=)%wE6M!TY|;d-~Kce;ebbI&U$_a!QI&6cp@;QC-Ynh;FOeB9wl}vAGWi_ zI6Eb)y%&;;oN5BIUcqEK(H~zuZ*l#_kGGu+l9nrvvTET;x{?m&Pz z)6(6l?`Nf+N{at87srvs@R$3YNC|1B5vn8BpnndfCUEtywUobq0}Eh3Y)W$a((OMt zeVTMUi_J2y7^X9|!b=N>O53xrkSYtG=HIjP+ixUv0p0}H|0JbRs^%hA!Jwk}>L&MM z80l=$8*p?p5nqzB2flUC+qS5co0cA{{~DgUCuuOfxzg5oVle|X6R|&z9HyjTBz7=W z6(hQ3!ou1{BT@o3I#0`zwh{7Z9D2aZ8IS0|LDbPB~{a9 zxp|-)J|}zIePx$CI@kZ{&VL_`9mP8HDU)6(_8QG5JF(05;fr7V+K5bA1Etfxgh#5W>6P@@ucg&Jzly%#@3VwSKV{@&j#^M( zTlz^^ZmfJ%6rHQ3%yEH?9(L!Qb(!Do<{dt5FB1GvxNV4LI{Pjr0R!MUrMb5A4pX>C zAxvpRy)idQ1E-@e{fUAX=xnu;V!H}&V4CM$%=yv*v=$M;MOYIcj@A-jG z?Rpip=$Wr@_3CPVC#tvr@>g2)Q4aUQ5cATrcEjnW2##{myh=>MLI$B&y6jSWY!swkxM01Od*SEg zL*Mh~ni#4$)U3y+i&vQhK|9qf!@Ie%77J-pfjx;IqN%|*VX(lO%{8>t0l{p^gB)j@ zJH57Z00N>gy2Swd*!!iFW0?|!a>O(rOZL2g&I>ppw~Od1Z5(JPKd=Q&^ICR7OvUvu z?=;QVzQ(dwYT=aA*Bg?3&P}yAThCa2|K}GvR-m-L*_!MseF=o;M zOc|!hnA_@||7VYZk>zZ=UNBMA*Vtu^yo#aiaXg2+Bh3A_ z*k(u*3VX`)Fpa0tfiD<{pqWjZHrb&h8UI8iP-1;S@Ctq-P$n^!{&NzMp(LvzzPbW5 z&{drM`nf{0E^A*AS;QZ4;qFHOENld#)$;q_vGmq_=O^r}ovg!Le`|uzGHczAnUBcG z`*F2&g&n(N8{#Biba+rq`+AT}7E##C*+mWAHcl~e8IF*BlD}ptBDDp7inFHgCu%CG zMQGj9<9umYwTdv9IK}7?I-Az9B4JJeXGNp&Y~9dg6EMByhM;=CJgzMOHPo|HyPwEF z>AJ~_8b(2SFF>9)iwy@AzPGmoydb?M{G9dKy6!c@N}i|Lt*7q!;Msy>FWlp16n&Oo zmA=5&{-W||a^yg79H=mpEMh=;B08fpy9ZhoVvt6ohvYk&9Dko*huy|_Vcj-#n-^%Y zs(?&;92eWq(+`)_??I^-a?v+g0KQKt*?l$Eba3dO7`iLIHz8VPfP?g+cWvw%=+?0$ zw8JC0o-q5rI(C{(+>M|Yd?#NqbO%~Z8MLGynQ3(_s2no9^BqZSUb>Vsq3+-&Z~pLd z_+I&t*zl@gEawwB70Gx!2LBP^FYe$`QL1{2+{*4>GM~87Lx9~C-tG~*?hRhhc=M|v zKz`3aEk0yz8Txxs?~Urdp+R0uG5?!T)2G;O-?IC#fm9y}er2kwt<)7;#|LG5_jh)$ z2Sc<@EHgtod#3K*7;KqZ%_)YTiJL+t8o!4XIkwUN@t`E$Yq7=AK*!U9H?3HzBPCK4 z1;{FwV^;P{uD18>%3tX3j%6R_MIMv&@f1V4k%w(DWcZccnI;9GVC^$jwAX_dysW1j zM`z~{y%DsP60N1h*GBx4LRqvFn|B3AG`4>4XHx@(`6tg$H8rQaVQ_b9%`q`{{4}r2 z=OP;`SSJVDA#p82i#aqb_VL+eJ|9?gaG4go0A~hZ1+MA=FDhbXUeMCt&#CDcTTP6} zxtZStJ{EiC|G-Fk6MeJpZ%|$9Rn@xfF(WyQBgyfy#vHNweKWz*P#EpEYl?H6KHpMe zvf?*tGh4Og7>Gv?^om*Qpu)Eyho@?PDF~@Ea^wN^&DqhE;aD3KU>Xh(01Gsi_yYOZ*mM@2DpdkOw<+vQpws53fo{ z&wajB`u(vzs_9!4?h&i8?CuR2u+G84+26IV*Kn`^r>fu|-veN(1Zn=_`3YN(XcZ|e1tk=THcWJbl zsYeS(c=;b95uXNv-?n*7v=S{~uMPFRje+fM2G~eIo<0k{7KhLSksr739qkp}2P;MF z#RHd6+;5fK#zzu#)US{5c4HuGKPuG*;1FXZM{@sop4IH+(rfX}ZIm^whc{X<>-WUx=Qlw6^^~L>&&#_wbjA%NM-BO- ziWfJ(=D?Q^MhL3 zhf&VlDz1)-0Zchw2k$O-icKDM^}Uif#Kk9m;R`tpuwOp5cG?~QR}6ZxbSN_q>1prP zakr4;NOzZ;Y^{A;uGpF=CV3`sPEq~4a=}w;)_$M_5ewbN6EZm-FnM6t)+yo$1Xzo8 zIpW+8HNz7 zlkMVkuwUHCF6C}Aw-E@>xDL3Tfbn_4oq*Rv>wJw&{jvDbvdHt}yMJ0Gb!csez+k{> zteQ9MH2^!E*)-B03O|-kOhidM>4!7jddvni6e+3Rhgmz{jFUl94}&j}0ME)Fp%mZ0 zXlE;DA$P!z`Ey7={LOdHkNp(zqOd+a5rWUY-&`B1w6#jF;9ooo#D?cdNf9r=4ZwBl zOS1FBam;Twsy90oaUIT{CrE+w=xr2`$no9-7C=scPgFEzAOBOZ<*$GeQ77bZwMSg@@7fXHj)VSZO3m{PYxnm7QPW=ydOs z{#chl`}vB`$gV*CX!a!p0D-WAVE#D;v=)%6LUt~fUM3C-2r&%X-h~3n92m=ymRYwXeipL(xBw46A4m}b zz`ipJfSi^h{{;9`Am@=3^@?4(vH)Plzr`-h3~f>NWL@{d&CLaZiG|xLcAC$!-`noB z4&_SEZ1pq&)UusTnZ1RoHFypZcN}#Eq#vv|MOIILF6sZa{{;$X363&PR_E3MC1@C^ zx;^*2YnYQiXAqu1qRWO2EB%sab?nau7YG4niID=(CE2JAH3TMJ{gmd4t!{42oMXA5 z=pHOdBnf^{06X{BZ;N29axO0saaETG~y-q@bv})p+406EHxk$8IMo93knK5R6iV?^+?4JEa=@BB{Wr;7$09%)WWpwdSJpc&gy?Xb#y=!13d4 znM6rJyf+X%lZo?o%SI9DF_A11@m+d`!3G!<29W9k_{MPczg;h)nTL@kmCpN_655)+ zV4#=PKa9~L!Z2&Ah&=5fCU1*LOJ_x$W27L$LOfiib%hP1*Z~zUp#+jQq7DbI-U%rP zk^T9Tl?*Eoupe)4-y&KT~+O##fDEImn4y>NutF;IVJ3=m*! z*rEW%TlVNiM?a>>U$u_$JuA>L6CZFXb!s@yP&g!R3gv~Bp}AgpK2=m0jV8VvIZv2C zmd^lRyVaiKaS$v6oYCt@$k#%)MsE3;z}yQ2L<8`{hpSucn4z6B`)k9mk!I(}zCa8Y zec8sli-a|~e-I8yodRg%#-Fe zUUq-YRYyjqA8u02I*uj{d;W>=)zk|nH^OPBH+T%!|4ND7zpZ*;!cs{XDZAc=)TpYy zygF?tJ_g{6^&8==X_&x=`hHuSa{O~;8JsY}g+Ry)97Lmf`m;+MhVx9uQw2LJB)YCN z@x%=aA>jGW@-K6kNsyeSxy?!*B;mpG3B8IhiJ`aL(iYA4>ejWW`Ba}KuFQ=qb|9UD zd1@S)Q6&PF^2(HexS@I93W4L)kM;M0t02}wjNHH_W_t5)TVGK6pzr>0Q^W#T7H~ar z!`O{mW@83Sl9!-P1c_I-0Gd=n(Al2Ah)MotJ8AbzM;HOuxA3PI2G5Xa4-KFnd(R<& zl~(O&-Jsc|!_LVQcPi|XD}`wzR1Xn(cG2(D_N3D~k*V(8k4i+i9K+ulxOuCy!l zG3U%zV9j+$hqsRH4K8qiX)Y%5kpNKe-@i7c^ly-X9EHpqEaj*3o>oZwj-~iM`20S0 zT+X3*f&BF+O!NkR{xf~GM<9^RlYs$K*d}c0boMm+LsFzfZDJ%7->?En1@nR69#5W9 zha7r+_<*>ozO^~mkB|Jz*z!F@=x~3DwK8pu~`vT=$gd#s-trjR06bA&FSRn zXosx!+Ar!=CCEZdm*mgGVygGr=a;bPp3`1$ojoLd5*z)h`E`i{b~Tk6I_M{*hrhdq z%km?-j{K^V=N^-HEYrs)!oparg}x|u^fEHypM^&WQ?T7d$N;k6=t`n8b5A;R7+fd< zweEqw_&I7h2Q`^|Nhl+xehysMZI!+Zv%IfOO*?!yw98BGB}51~t1J8V4~T$`Y~Rpg zC&EB-_kDWzSVdXeG7o$eVQN}aM@*<;RL@^rCgVF8kWxLDx|}q#y}Sgg z??KODbbqBH3$u%uWAZp`x1p_9+vh$RDq>q8DArehLiv#? zS7p(k_klZ2h^UNb09!*yqk7YKmjb|*xuZ{kcYnK_eZDQDgCKrv@~G^rCEkhh1KQ8t zT~&5d1+9jn{HLO`4hJ}XbT4W*8iQ!OH&R^30MdVf>goXR<52$G7-HwbKISxdG2?gX zUWamaU{hM>`{N6CBF`m|UO7f;#%00^HUQjv90sS3{=D}p=qL&|w9G5VQ=hdE?qpB* z8CVyn;Vhr1QP!CQcS~LOSrN!b4hgV%PU9lhY4qrPo;TID@g4v|;8auwD2G~SY|8TM zRd4Zg*tNyl*2c@A_Z{0+cYF_Pnwm+58d;%$qhs2q2XkwXMBxG|t)dcF{495+0LXAt z%=xkC)NYk>2z6<8b(io?P7{c7z!dMrujH=okhn5qE$`KV2r?QH33y;KVw{~_a|@d1 zgc0Z`b1>*sRl_Co)qBcExZwdxbAVy&napFK=rMOLMPFe2WYbbO%19sPa*w$o;yi)Q z){i?j*$ujpB;L{(m!Q9Sdnfg#tTM-#YfR{CBw}t`;qUq-!x&bmVfcN}-Po_I4}X}_ z^t5y1VNMmk9z)``Ic6iQsFY7DwU8AJM3TN>l5#qSx#b5zyk}gA%k0LtM)YSep6s(K^!|HPUZnNmZx2o z56)-Om(G2o{0FL>zz=R>3h!{P^*+b!qaH#4@l(IQkuiEnE!ScPnFiaVAO&1gyai|l zAdJrT+kK3Se%<(USIy}n=jk}2Ht2IX#e*edEZ~-uUSJSqWC~k*1T5@YJrKyPnw|X< z+W3-Y{WKrQdHr~R3#5&-TZ3zM;w6TqCbhn|Rj2*Z?S*6e-R?XXaRSs|oId=TVXLu{ zj|URNRPe7+kR)4)8|nkVLsFM7<=Vmf~HeV0cp1|XXS5L@wjNj5yKie+KA z%09g+z^n`}M7P_vg>2)FYXMz=YLgchlQ~%ARY;7G(ys;3HB-ed^wLOTkupT@Nfk&o zqI^Bl$76XEwk1?h=qO6mkigTz!Fhq&Y~rX|9n!A;SBB16W?nFjd=|4(?*6`uT>Om4 zq!ivu+WiqM6jPn_bYNMC+&k!sajqnXW6Bh6mHZwrOTOLYAspD>VRI&w0>>Ym%QSk7jV!gm~4K#@3Ekv@C& z1ZW^9I}1pIxbnRqUm@~9k$hFsgO$SclmV)dsY(@axX-x;K@08<7z*M;!)D+)GnB-h_pX#E4&BdOm}hnZK05O34%JkmnSPvltCp%pfh1YkKaB; z3r{eK1y}F0zteZ!VbKxN(mGE+RBO2}il&HvJsmW5)9f&i7~9pNsoJ;9UuU{fdVZPdaTpl3xn;a*6FGldFooO)ID5!f~o{A(@Q|Z#mgd97e)$p~ZNmCyO%SVP)0qcao?}@xnBznLG_-jRz&F1t?k^&(O-9@Xe66ob6E_pADB_QBgqLn-gy;LfXQBSJbXm4Qa z)@0fUbjL-DMg$ghS|B?CBkpf7|ESPv)cUf`>w<=*s{H62A8V`IiK$3LELwT3jzOb8 zl!S`+Tx&GX2;@<}_wVf9BLV{P^(7He508+NcJ@gXb$|bH_KiMExuqD$AfHUdt4cDv znhtuostMM!A9W3zyP;)%a8-~&aywa>^w*%F9kkeh*?k3sl)5}_ql5OAK9{{G;i=&{ zUtHfQwYq)Mv2b#<<*l>m4R5ck-D}D4z}ASJ5n|#5oacfBD7c8Ej7x(YE2s{iT$}G8 zwe%kv{OB)T?3TF&{@pgAVDqA%#pj(8sb{K9MGKWIs8|2FC9!(el>JYu3twNt=QY>E zEfe)z$ve+s#4HImOHv>x>;K~XCmW++46hoJ3_n6i`N8IeHpqE;laOh)fhBpsdn`Zm z=#=<=nmx{M{f?01b_7r#Sw*A=7N%8-I`SyqirAZn#cfO6?4!h|ub2b6?Zr>?Gx+w7 z<{S4WRHS>hqD7{`zLA&m^E6s)FM4@tcOl;dBnnQWimCjB_^)p8k672RtIbfAuFEw6 zgYuq`4j>h|oy0tO_clJuOF6%_;JJXGg4^VC^xm%7&7Hy10VSjE=3!N?H7J&iqdP}I zNOMAsr-?az+$AOgF>TROKSqm>+JU2`&iM{Ko72ZNnN>6ZNGPvfr7rwp!%~yBJSI?n zB3R6?SX`*e^MKP=??Xlg7}vTlQ%RIYvO<*`dtIXjAi@O&*XGS5Uh^+>(R3{UOg9!# zeFU6}>N+X$p5K_^4*d4ma(tvz--++&_^^mulRJJ-8!^^{Pd@#l&?DVnNZmyNE?sJo z1v4i@CO?oX!Dg1Lqv3TWiOw2-FS)k+Ljd}Ilpuew^DX&cs7!n~Bu5*_ zbNkTWkOG>zvpx~^d~foOLY<`#J9-4ss8Adupv(u66p=MohfRAYxhNA>HSU|gUFPg!w!n|-*n&2dvMVX3i(q* z9avlqEO5CvT+~(A1xN=y58K~En@L`vMnKT5ufo5MXk%ca7vW2=31I*Km~>+KC%76EmzQ{Epl*zpBy66^=k| zgxepQc7MxTEKkj~;bY@6Gd!n`K!v>&``bhrGUsu ziUsg|6^14}8h@IKz!**2yt0@~gdZ8TQ6A456}yw%F8pOXchJfG;IsFRiK@Z=(_=hNf1EVaQxVt%sqi_6s+|aA2Hs^Z1^jkF} z)1c0f3(n&QqBF4M>5l>N+2Cviy}C>Nvp)=5PwiM9D`yM8h&eBJ^iH_7m4)X9Q%5MW z!_c~><|XVWOt+Ve`j5xtKb#eCe342+D}=)aV%}@?*l_nK;*CILu4J}tqmJH`|Q{N79k>Ago$ zw+kbnqFbA;2Yd~D;IrHI?7OkQTl}|AhyXHl!kF?*%64_xllGp7XvzmFLZx)x^DQ#( z8yw9_Kg)1_V%`v*<7w+#aJ1^1V0 zi}6B_vKX`{IOQ9xZOX1JvESZ!V(|c{SDSNfEs(d|CL$lQ?eu!R|NTpI?eS+Kf7vNl z%!IbKTC}f)@d@}t?+klTHUjTrMxbJXOeb(+XezwY*G|WOOma4Pdp;kk0Fl~k&%h$# zkNqIcqQ*;D3DT7}0g$zkqn+g9IR#X2V^fthIf z6xut?2mox2Q$mF~UZ_v*={bqjQ*QF8smcNS^TPHE8vGd1^gz*sl^2S)?%;8oMxCX* z0SKz`wGxRnWD$dRPEJ3QEZIK37gU^>f=z&GmJiDQqp92G7HAK&!?*nn%c^fgy_Rc6 z2i%y;23Y}W_x9$Q$t)tiX=};p&i0?ga{!QL{KM0y9!)RXqLg$ix6$n3zDrBF#amXMJ8=viyxAutbQYXsIV&Y%NuDPMK&4 z49)cj0zMhnla%IgMc=|Vc20Hv6p#_`b$l?h_yQC$DIz&u_>1yOOYIZ}$7ErKy`9Iw zO&^zH_d%1a{z{drpduoOO9>4K0hs_Wqej8OE3;~Jz;l|XDy($&>P*2><8H_*uLF

Bg_)L$48&o}ack zt!HAQladuqH6nxcdZjxF6vTK*7%AV0IOX}JZ^u_OkMumMel#gIzv1VJUb|?$j71?Q zE%B-a&vFk0&Z20HLn$L5hH9dQ4Ef5bKHOX_RfA5_`gJvR%bW4}0DCGg0Va&YC>upU z1&8GC>xLY}LkxejF;yq2sOY~5Cqo4UPpDVfIk~O=P(Gb%k`up^XsPRv9)fXL9y}y5 z_nYE{jUJUly7pJ?$r_eiajL0B@N?t6efF1pS9lk2UGDMWIj>@eS3OH*kme)>qMs4c zXm_q!8hu`I8Dhv5J1?UvlYLJ*X6D(~=y^sb&mT@}>JxIp-$sTvzSdP`p2 zJSTpI%GX_eG4+h<`8LXOC{XRWB^Zd0<_&>b2eoF`!(!~j1?f~Xrey`D;63!OSo@1j zhVkNITfkbp3}EkdgcmMq1ZjCrPrG8@X$!y3nZFDG<8?k82gdmVJ<<=koN(R0uQJ&q&;Hm9we)+ z>EiW_<2L!S=W{bXQR{t-Xk&^=i3!xJ1GH*$ft^*`kFY2U5$7}?R(xpv1OE!wXhM-U z{|-m&g!{Wci5J~AR7xhAcz2%T1X}X;Lm5kqn$n>CdZG%!jlgyEgu}8cSE=Tna>KaW zE{i{1$MRasX61aWzh$Kz+gg_XXfmLI)gJjN8%-etjvKrZa0TIVu@FNzi_DBbgG}?X zc%`<^VRHLdaERnl?k6tkmy~V-6wDDz--2n{wg8GA9pEX|*>WDA)uFS%@&MN%-fruuv;9cz5hkb^M5ny{e|a>hsQrwX-D^ zNBO*f*^1b~?2NHQ2g~@Hiq$_V;Z8|Q@sz(OJ-_;@xOeF3^57a-&;bjXTR0^(9&O1`Qs5mDXKaZ{gYh~^a4>N0el@Q8ji@;G0VwtnJC=gh zkvhS5%hY0~-FeS=8*`P?hYjBB(TJ(h3vvLJ;q{z?7Jf~hQZOlh z8(M!wZ6}#fur(A{mkQvWp^(z{kJ9Qcf&hRH6HK!c5b1?W28wck^aQ{Ju^0{PD&(m< zLbu{`I17`!l>tz)uDi~j;Q*7#M!X;*rP#E#K`0MHVO=C_Y>Kpq3I%tz)KS21z(70w zu+S+efLBlwQ<3knLW3-aM&dW%uuv1r?kQqDWsn|wHc35K!jBl%t~9oLn(8w!(iF2Y zj)Ca3zC|-dT69O*c44Drzb zTWRcGB4pjuzq;tNLKJ7H7&3CTH`^5&fs^&Csh+4TdNT~n>o18P123FK1<6Ap7>9OE zH?~wjjt8TicxY-E;Ld@{^P%j9BpeC^HJlw8iW2I<*Vc#1VH5Y&k@$T23>riN5a^fE z1xtK3We+L~pTm`C@mKj&g@flbKtRjuc?i2+3nW^d516YT2Qx*|&}0%IIZ{BK)n@-A z7W$f>T8g%2qR?15Ezb7#Al1jSng=Um;<;^4CZ)UF4*B#s16}WxXDR&e;IFDemyJBP zQD>pCTMx2W)Iwg$ckc4ElkYeRp?hVt(&_O^6u-=WO4y!T>T70vx)M)1f17nmsqw{$ zZhYP=C<2j}m^Lg-ex&8RKf~0WrfrL!~NLQ^Xk?)No>Z1xiNn~MBa)Y#@)-369lm6 zDH#U3Pv&kw{mt3+_t3uW8=*{`^%?Rl0~ff@XZrjZDZCjasI5PtLP6)zBrRO$pYlfV z4DCdS&(8)uR*23AuP?jhTi@#6yL=G*b~7!5d3?2d_2DzzR3V+SVCZe@ULo|*Qz`CV z*j}+=>D|^uy$<{s0tgCLYba;t*4jWsj}CfRI?T4cS90us!fy8Lz;(qp7V}H&`+loKEpPZr6wt`2EqWMWj z6-EjjNWU@D@IQdNmlvT5>qUq{`9m 0 { + return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs) + } + if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames) + } + if info.AllowedCN != "" && info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames) + } + if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } + info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead") verifyCertificate = func(cert *x509.Certificate) bool { return info.AllowedCN == cert.Subject.CommonName } } if info.AllowedHostname != "" { + info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead") verifyCertificate = func(cert *x509.Certificate) bool { return cert.VerifyHostname(info.AllowedHostname) == nil } } + if len(info.AllowedCNs) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedCN := range info.AllowedCNs { + if allowedCN == cert.Subject.CommonName { + return true + } + } + return false + } + } + if len(info.AllowedHostnames) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedHostname := range info.AllowedHostnames { + if cert.VerifyHostname(allowedHostname) == nil { + return true + } + } + return false + } + } if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 8a2225b2277..312d03e7a62 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -86,7 +86,7 @@ func New(cfg Config) (*Client, error) { // service interface implementations and do not need connection management. func NewCtxClient(ctx context.Context, opts ...Option) *Client { cctx, cancel := context.WithCancel(ctx) - c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)} + c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), mu: new(sync.RWMutex)} for _, opt := range opts { opt(c) } diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go index 7dc5ddae0fd..8c50dcfa93d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -19,6 +19,7 @@ package clientv3 import ( "context" + "errors" "io" "sync" "time" @@ -85,7 +86,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien } continue } - if !isSafeRetry(c.lg, lastErr, callOpts) { + if !isSafeRetry(c, lastErr, callOpts) { return lastErr } } @@ -279,7 +280,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } - return isSafeRetry(s.client.lg, err, s.callOpts), err + return isSafeRetry(s.client, err, s.callOpts), err } func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { @@ -319,17 +320,28 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro } // isSafeRetry returns "true", if request is safe for retry with the given error. -func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { +func isSafeRetry(c *Client, err error, callOpts *options) bool { if isContextError(err) { return false } + + // Situation when learner refuses RPC it is supposed to not serve is from the server + // perspective not retryable. + // But for backward-compatibility reasons we need to support situation that + // customer provides mix of learners (not yet voters) and voters with an + // expectation to pick voter in the next attempt. + // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability. + if errors.Is(err, rpctypes.ErrGPRCNotSupportedForLearner) && len(c.Endpoints()) > 1 { + return true + } + switch callOpts.retryPolicy { case repeatable: return isSafeRetryImmutableRPC(err) case nonRepeatable: return isSafeRetryMutableRPC(err) default: - lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) return false } } diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 41a6ec97633..963f7e65c39 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -1036,7 +1036,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest { func streamKeyFromCtx(ctx context.Context) string { if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) + return fmt.Sprintf("%+v", map[string][]string(md)) } return "" } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 7e08aab35e7..fc4a7b1dbf5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -330,7 +330,7 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade case reflect.Int64: return reflect.ValueOf(i64), nil case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int + if i64 > math.MaxInt { // Can we fit this inside of an int return emptyValue, fmt.Errorf("%d overflows int", i64) } @@ -434,7 +434,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return fmt.Errorf("%d overflows uint64", i64) } case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint return fmt.Errorf("%d overflows uint", i64) } default: diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 85254727695..39b07135b18 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -164,11 +164,15 @@ func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refl return reflect.ValueOf(uint64(i64)), nil case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint return emptyValue, fmt.Errorf("%d overflows uint", i64) } - return reflect.ValueOf(uint(i64)), nil + return reflect.ValueOf(uint(v)), nil default: return emptyValue, ValueDecoderError{ Name: "UintDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 9695704246d..af6ae7b76bf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -95,9 +95,9 @@ func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) { return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t) } - i, err := strconv.ParseInt(val.v.(string), 16, 64) + i, err := strconv.ParseUint(val.v.(string), 16, 8) if err != nil { - return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string)) + return nil, 0, fmt.Errorf("invalid $binary subType string: %q: %w", val.v.(string), err) } subType = byte(i) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index a242bb57cf7..0e07d505586 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -842,7 +842,7 @@ func (vr *valueReader) peekLength() (int32, error) { } idx := vr.offset - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readLength() (int32, error) { return vr.readi32() } @@ -854,7 +854,7 @@ func (vr *valueReader) readi32() (int32, error) { idx := vr.offset vr.offset += 4 - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readu32() (uint32, error) { @@ -864,7 +864,7 @@ func (vr *valueReader) readu32() (uint32, error) { idx := vr.offset vr.offset += 4 - return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil + return binary.LittleEndian.Uint32(vr.d[idx:]), nil } func (vr *valueReader) readi64() (int64, error) { @@ -874,8 +874,7 @@ func (vr *valueReader) readi64() (int64, error) { idx := vr.offset vr.offset += 8 - return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 | - int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil + return int64(binary.LittleEndian.Uint64(vr.d[idx:])), nil } func (vr *valueReader) readu64() (uint64, error) { @@ -885,6 +884,5 @@ func (vr *valueReader) readu64() (uint64, error) { idx := vr.offset vr.offset += 8 - return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 | - uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil + return binary.LittleEndian.Uint64(vr.d[idx:]), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 4d1bfb31601..a8088e1e30b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -88,8 +88,12 @@ func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval) } -// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext -// instead of the one attached or the default registry. +// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses +// the provided DecodeContext instead of the one attached or the default +// registry. +// +// Deprecated: Use [RawValue.UnmarshalWithRegistry] with a custom registry to customize +// unmarshal behavior instead. func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error { if dc == nil { return ErrNilContext diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index b5b0f356878..d6afb2850e2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -10,15 +10,27 @@ import ( "go.mongodb.org/mongo-driver/bson/bsoncodec" ) -// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the -// primitive codecs. +// DefaultRegistry is the default bsoncodec.Registry. It contains the default +// codecs and the primitive codecs. +// +// Deprecated: Use [NewRegistry] to construct a new default registry. To use a +// custom registry when marshaling or unmarshaling, use the "SetRegistry" method +// on an [Encoder] or [Decoder] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Encoder] and [Decoder] for more examples. var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. // -// Deprecated: Use NewRegistry instead. +// Deprecated: Use [NewRegistry] instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go index c5ff1474b4f..0a6c1bdcabf 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go @@ -9,6 +9,7 @@ package logger import ( "encoding/json" "io" + "math" "sync" "time" ) @@ -36,7 +37,11 @@ func NewIOSink(out io.Writer) *IOSink { // Info will write a JSON-encoded message to the io.Writer. func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) { - kvMap := make(map[string]interface{}, len(keysAndValues)/2+2) + mapSize := len(keysAndValues) / 2 + if math.MaxInt-mapSize >= 2 { + mapSize += 2 + } + kvMap := make(map[string]interface{}, mapSize) kvMap[KeyTimestamp] = time.Now().UnixNano() kvMap[KeyMessage] = msg diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index 1bedcc3f8a9..8d0a2031de5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -531,6 +531,12 @@ func (cs *ChangeStream) ID() int64 { return cs.cursor.ID() } +// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent +// call to Next or TryNext will do a network request to fetch the next batch. +func (cs *ChangeStream) RemainingBatchLength() int { + return len(cs.batch) +} + // SetBatchSize sets the number of documents to fetch from the database with // each iteration of the ChangeStream's "Next" or "TryNext" method. This setting // only affects subsequent document batches fetched from the database. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 280749c7dd6..4266412aab9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -209,10 +209,6 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { clientOpt.SetMaxPoolSize(defaultMaxPoolSize) } - if err != nil { - return nil, err - } - cfg, err := topology.NewConfig(clientOpt, client.clock) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 555035ff517..4cf6fd1a1a3 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -1200,6 +1200,11 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (cur *Cursor, err error) { + + if ctx == nil { + ctx = context.Background() + } + // Omit "maxTimeMS" from operations that return a user-managed cursor to // prevent confusing "cursor not found" errors. To maintain existing // behavior for users who set "timeoutMS" with no context deadline, only @@ -1217,10 +1222,6 @@ func (coll *Collection) find( opts ...*options.FindOptions, ) (cur *Cursor, err error) { - if ctx == nil { - ctx = context.Background() - } - f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err @@ -1801,8 +1802,11 @@ func (coll *Collection) Indexes() IndexView { // SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection. func (coll *Collection) SearchIndexes() SearchIndexView { + c, _ := coll.Clone() // Clone() always return a nil error. + c.readConcern = nil + c.writeConcern = nil return SearchIndexView{ - coll: coll, + coll: c, } } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index c5cda9e5bd3..57c0186eca6 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -580,7 +580,7 @@ func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collection return encryptedFields, nil } -// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// getEncryptedFieldsFromMap tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. // Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { // Check the EncryptedFieldsMap diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index db56745919f..17b37313010 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "net/http" "strings" @@ -1177,7 +1178,19 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := make([]byte, 0, len(keyData)+len(certData)+1) + keySize := len(keyData) + if keySize > 64*1024*1024 { + return "", errors.New("X.509 key must be less than 64 MiB") + } + certSize := len(certData) + if certSize > 64*1024*1024 { + return "", errors.New("X.509 certificate must be less than 64 MiB") + } + dataSize := keySize + certSize + 1 + if dataSize > math.MaxInt { + return "", errors.New("size overflow") + } + data := make([]byte, 0, dataSize) data = append(data, keyData...) data = append(data, '\n') data = append(data, certData...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index fd17ce44e16..36088c2fcbf 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -104,7 +104,7 @@ const ( // UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that // was changed. UpdateLookup FullDocument = "updateLookup" - // WhenAvailable includes a post-image of the the modified document for replace and update change events + // WhenAvailable includes a post-image of the modified document for replace and update change events // if the post-image for this event is available. WhenAvailable FullDocument = "whenAvailable" ) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go index 9774d615ba0..8cb8a08b780 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go @@ -9,6 +9,7 @@ package options // SearchIndexesOptions represents options that can be used to configure a SearchIndexView. type SearchIndexesOptions struct { Name *string + Type *string } // SearchIndexes creates a new SearchIndexesOptions instance. @@ -22,6 +23,12 @@ func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions { return sio } +// SetType sets the value for the Type field. +func (sio *SearchIndexesOptions) SetType(typ string) *SearchIndexesOptions { + sio.Type = &typ + return sio +} + // CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or // SearchIndexView.CreateMany operation. type CreateSearchIndexesOptions struct { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go index 6a7871531e1..73fe8534ed4 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -109,6 +108,9 @@ func (siv SearchIndexView) CreateMany( if model.Options != nil && model.Options.Name != nil { indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name) } + if model.Options != nil && model.Options.Type != nil { + indexes = bsoncore.AppendStringElement(indexes, "type", *model.Options.Type) + } indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition) indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx) @@ -134,20 +136,13 @@ func (siv SearchIndexView) CreateMany( return nil, err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewCreateSearchIndexes(indexes). - Session(sess).WriteConcern(wc).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name).CommandMonitor(siv.coll.client.monitor). - Deployment(siv.coll.client.deployment).ServerSelector(selector).ServerAPI(siv.coll.client.serverAPI). + Session(sess).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Collection(siv.coll.name).Database(siv.coll.db.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) err = op.Execute(ctx) @@ -196,20 +191,12 @@ func (siv SearchIndexView) DropOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewDropSearchIndex(name). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) @@ -258,20 +245,12 @@ func (siv SearchIndexView) UpdateOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewUpdateSearchIndex(name, indexDefinition). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index 1d9472ec0b0..7a73d8d72f9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -51,7 +51,7 @@ var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be n type WriteConcern struct { // W requests acknowledgment that the write operation has propagated to a // specified number of mongod instances or to mongod instances with - // specified tags. It sets the the "w" option in a MongoDB write concern. + // specified tags. It sets the "w" option in a MongoDB write concern. // // W values must be a string or an int. // diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 879bbdb7a41..659d48d7a82 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.15.0" +var Driver = "1.16.0" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 88133293ea7..03925d7ada1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -8,6 +8,7 @@ package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( "bytes" + "encoding/binary" "fmt" "math" "strconv" @@ -706,17 +707,16 @@ func ReserveLength(dst []byte) (int32, []byte) { // UpdateLength updates the length at index with length and returns the []byte. func UpdateLength(dst []byte, index, length int32) []byte { - dst[index] = byte(length) - dst[index+1] = byte(length >> 8) - dst[index+2] = byte(length >> 16) - dst[index+3] = byte(length >> 24) + binary.LittleEndian.PutUint32(dst[index:], uint32(length)) return dst } func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) } func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(i32)) + return append(dst, b...) } // ReadLength reads an int32 length from src and returns the length and the remaining bytes. If @@ -734,27 +734,26 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return int32(binary.LittleEndian.Uint32(src)), src[4:], true } func appendi64(dst []byte, i64 int64) []byte { - return append(dst, - byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24), - byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(i64)) + return append(dst, b...) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func appendu32(dst []byte, u32 uint32) []byte { - return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, u32) + return append(dst, b...) } func readu32(src []byte) (uint32, []byte, bool) { @@ -762,23 +761,20 @@ func readu32(src []byte) (uint32, []byte, bool) { return 0, src, false } - return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true + return binary.LittleEndian.Uint32(src), src[4:], true } func appendu64(dst []byte, u64 uint64) []byte { - return append(dst, - byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24), - byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, u64) + return append(dst, b...) } func readu64(src []byte) (uint64, []byte, bool) { if len(src) < 8 { return 0, src, false } - u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 | - uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56) - return u64, src[8:], true + return binary.LittleEndian.Uint64(src), src[8:], true } // keep in sync with readcstringbytes diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go index 6837b53fc5b..f68e1da1a09 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. +// Package bsoncore is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package bsoncore contains functions that can be used to encode and decode +// BSON elements and values to or from a slice of bytes. These functions are +// aimed at allowing low level manipulation of BSON and can be used to build a +// higher level BSON library. // // The Read* functions within this package return the values of the element and // a boolean indicating if the values are valid. A boolean was used instead of @@ -15,15 +23,12 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to +// number of bytes available, it will return false, if there are enough bytes, +// it will return those bytes and true. It is the consumers responsibility to // validate those bytes. // // The Append* functions within this package will append the type value to the // given dst slice. If the slice has enough capacity, it will not grow the // slice. The Append*Element functions within this package operate in the same // way, but additionally append the BSON type and the key before the value. -// -// Warning: Package bsoncore is unstable and there is no backward compatibility -// guarantee. It is experimental and subject to change. package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md deleted file mode 100644 index 3c3e6c56cd0..00000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ /dev/null @@ -1,27 +0,0 @@ -# Driver Library Design - -This document outlines the design for this package. - -## Deployment, Server, and Connection - -Acquiring a `Connection` from a `Server` selected from a `Deployment` enables sending and receiving -wire messages. A `Deployment` represents an set of MongoDB servers and a `Server` represents a -member of that set. These three types form the operation execution stack. - -### Compression - -Compression is handled by Connection type while uncompression is handled automatically by the -Operation type. This is done because the compressor to use for compressing a wire message is -chosen by the connection during handshake, while uncompression can be performed without this -information. This does make the design of compression non-symmetric, but it makes the design simpler -to implement and more consistent. - -## Operation - -The `Operation` type handles executing a series of commands using a `Deployment`. For most uses -`Operation` will only execute a single command, but the main use case for a series of commands is -batch split write commands, such as insert. The type itself is heavily documented, so reading the -code and comments together should provide an understanding of how the type works. - -This type is not meant to be used directly by callers. Instead a wrapping type should be defined -using the IDL. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go new file mode 100644 index 00000000000..99c4c3470f2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package creds is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package creds diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go index 9db65cf193c..5f9f1f57430 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go @@ -4,20 +4,11 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package auth is not for public use. +// Package auth is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. // -// The API for packages in the 'private' directory have no stability -// guarantee. -// -// The packages within the 'private' directory would normally be put into an -// 'internal' directory to prohibit their use outside the 'mongo' directory. -// However, some MongoDB tools require very low-level access to the building -// blocks of a driver, so we have placed them under 'private' to allow these -// packages to be imported by projects that need them. -// -// These package APIs may be modified in backwards-incompatible ways at any -// time. -// -// You are strongly discouraged from directly using any packages -// under 'private'. +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go index d79b024b74d..d9a6c68feed 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go @@ -30,7 +30,11 @@ type CompressionOpts struct { // destination writer. It panics on any errors and should only be used at // package initialization time. func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder { - enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(lvl)) + enc, err := zstd.NewWriter( + nil, + zstd.WithWindowSize(8<<20), // Set window size to 8MB. + zstd.WithEncoderLevel(lvl), + ) if err != nil { panic(err) } @@ -105,6 +109,13 @@ func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) { return dst, nil } +var zstdBufPool = sync.Pool{ + New: func() interface{} { + s := make([]byte, 0) + return &s + }, +} + // CompressPayload takes a byte slice and compresses it according to the options passed func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { switch opts.Compressor { @@ -123,7 +134,13 @@ func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { if err != nil { return nil, err } - return encoder.EncodeAll(in, nil), nil + ptr := zstdBufPool.Get().(*[]byte) + b := encoder.EncodeAll(in, *ptr) + dst := make([]byte, len(b)) + copy(dst, b) + *ptr = b[:0] + zstdBufPool.Put(ptr) + return dst, nil default: return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index 52068b8ea83..686458e2929 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package connstring is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index 6573a4c1ade..9334d493ed9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package dns is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package dns import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 5fd3ddcb42d..900729bf874 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package driver is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index 24f9f9b0ec3..80f500085cb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -7,6 +7,13 @@ //go:build !cse // +build !cse +// Package mongocrypt is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package mongocrypt import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go new file mode 100644 index 00000000000..e0cc77052a8 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package options is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package options diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 8700728729a..2bff94a659b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package ocsp is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package ocsp import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index eb1acec88f3..db5367bed5d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -622,7 +622,7 @@ func (op Operation) Execute(ctx context.Context) error { } }() for { - // If we're starting a retry and the the error from the previous try was + // If we're starting a retry and the error from the previous try was // a context canceled or deadline exceeded error, stop retrying and // return that error. if errors.Is(prevErr, context.Canceled) || errors.Is(prevErr, context.DeadlineExceeded) { @@ -1574,11 +1574,17 @@ func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) // operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is // not a Timeout context, calculateMaxTimeMS returns 0. func (op Operation) calculateMaxTimeMS(ctx context.Context, mon RTTMonitor) (uint64, error) { - if csot.IsTimeoutContext(ctx) { - if op.OmitCSOTMaxTimeMS { - return 0, nil - } - + // If CSOT is enabled and we're not omitting the CSOT-calculated maxTimeMS + // value, then calculate maxTimeMS. + // + // This allows commands that do not currently send CSOT-calculated maxTimeMS + // (e.g. Find and Aggregate) to still use a manually-provided maxTimeMS + // value. + // + // TODO(GODRIVER-2944): Remove or refactor this logic when we add the + // "timeoutMode" option, which will allow users to opt-in to the + // CSOT-calculated maxTimeMS values if that's the behavior they want. + if csot.IsTimeoutContext(ctx) && !op.OmitCSOTMaxTimeMS { if deadline, ok := ctx.Deadline(); ok { remainingTimeout := time.Until(deadline) rtt90 := mon.P90() @@ -1893,7 +1899,6 @@ func (op Operation) decodeResult(ctx context.Context, opcode wiremessage.OpCode, return nil, errors.New("malformed wire message: insufficient bytes to read single document") } case wiremessage.DocumentSequence: - // TODO(GODRIVER-617): Implement document sequence returns. _, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm) if !ok { return nil, errors.New("malformed wire message: insufficient bytes to read document sequence") @@ -1989,7 +1994,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma } } -// canPublishSucceededEvent returns true if a CommandSucceededEvent can be +// canPublishFinishedEvent returns true if a CommandSucceededEvent can be // published for the given command. This is true if the command is not an // unacknowledged write and the command monitor is monitoring succeeded events. func (op Operation) canPublishFinishedEvent(info finishedInformation) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go index a16f9d716bb..cb0d8079527 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go @@ -15,7 +15,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -23,19 +22,18 @@ import ( // CreateSearchIndexes performs a createSearchIndexes operation. type CreateSearchIndexes struct { - indexes bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result CreateSearchIndexesResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + indexes bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result CreateSearchIndexesResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult. @@ -109,9 +107,15 @@ func (csi *CreateSearchIndexes) Execute(ctx context.Context) error { return driver.Operation{ CommandFn: csi.command, ProcessResponseFn: csi.processResponse, + Client: csi.session, + Clock: csi.clock, CommandMonitor: csi.monitor, + Crypt: csi.crypt, Database: csi.database, Deployment: csi.deployment, + Selector: csi.selector, + ServerAPI: csi.serverAPI, + Timeout: csi.timeout, }.Execute(ctx) } @@ -214,16 +218,6 @@ func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelect return csi } -// WriteConcern sets the write concern for this operation. -func (csi *CreateSearchIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateSearchIndexes { - if csi == nil { - csi = new(CreateSearchIndexes) - } - - csi.writeConcern = writeConcern - return csi -} - // ServerAPI sets the server API version for this operation. func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes { if csi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go new file mode 100644 index 00000000000..e55b12a7488 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package operation is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package operation diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go index 25cde8154b0..3992c831656 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,19 +21,18 @@ import ( // DropSearchIndex performs an dropSearchIndex operation. type DropSearchIndex struct { - index string - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result DropSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result DropSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // DropSearchIndexResult represents a dropSearchIndex result returned by the server. @@ -93,7 +91,6 @@ func (dsi *DropSearchIndex) Execute(ctx context.Context) error { Database: dsi.database, Deployment: dsi.deployment, Selector: dsi.selector, - WriteConcern: dsi.writeConcern, ServerAPI: dsi.serverAPI, Timeout: dsi.timeout, }.Execute(ctx) @@ -196,16 +193,6 @@ func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) return dsi } -// WriteConcern sets the write concern for this operation. -func (dsi *DropSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropSearchIndex { - if dsi == nil { - dsi = new(DropSearchIndex) - } - - dsi.writeConcern = writeConcern - return dsi -} - // ServerAPI sets the server API version for this operation. func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex { if dsi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go index ba807986c90..64f2da7f6fe 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,20 +21,19 @@ import ( // UpdateSearchIndex performs a updateSearchIndex operation. type UpdateSearchIndex struct { - index string - definition bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result UpdateSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + definition bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result UpdateSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result. @@ -95,7 +93,6 @@ func (usi *UpdateSearchIndex) Execute(ctx context.Context) error { Database: usi.database, Deployment: usi.deployment, Selector: usi.selector, - WriteConcern: usi.writeConcern, ServerAPI: usi.serverAPI, Timeout: usi.timeout, }.Execute(ctx) @@ -209,16 +206,6 @@ func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector return usi } -// WriteConcern sets the write concern for this operation. -func (usi *UpdateSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *UpdateSearchIndex { - if usi == nil { - usi = new(UpdateSearchIndex) - } - - usi.writeConcern = writeConcern - return usi -} - // ServerAPI sets the server API version for this operation. func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex { if usi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go new file mode 100644 index 00000000000..80b2ac2dd5e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package session is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package session diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index 476459e8e6e..649e87b3d19 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -320,7 +320,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead // If there was an error and the context was cancelled, we assume it happened due to the cancellation. if errors.Is(ctx.Err(), context.Canceled) { - return context.Canceled + return ctx.Err() } // If there was a timeout error and the context deadline was used, we convert the error into @@ -329,7 +329,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead return originalError } if netErr, ok := originalError.(net.Error); ok && netErr.Timeout() { - return context.DeadlineExceeded + return fmt.Errorf("%w: %s", context.DeadlineExceeded, originalError.Error()) } return originalError diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 07f508caaee..c7b168dc2c4 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -56,7 +56,6 @@ type rttMonitor struct { cfg *rttConfig ctx context.Context cancelFn context.CancelFunc - started bool } var _ driver.RTTMonitor = &rttMonitor{} @@ -83,7 +82,6 @@ func (r *rttMonitor) connect() { r.connMu.Lock() defer r.connMu.Unlock() - r.started = true r.closeWg.Add(1) go func() { @@ -97,10 +95,6 @@ func (r *rttMonitor) disconnect() { r.connMu.Lock() defer r.connMu.Unlock() - if !r.started { - return - } - r.cancelFn() // Wait for the existing connection to complete. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index f4c6d744aab..99f8dd618ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -125,6 +125,7 @@ type Server struct { processErrorLock sync.Mutex rttMonitor *rttMonitor + monitorOnce sync.Once } // updateTopologyCallback is a callback used to create a server that should be called when the parent Topology instance @@ -285,10 +286,10 @@ func (s *Server) Disconnect(ctx context.Context) error { close(s.done) s.cancelCheck() - s.rttMonitor.disconnect() s.pool.close(ctx) s.closewg.Wait() + s.rttMonitor.disconnect() atomic.StoreInt64(&s.state, serverDisconnected) return nil @@ -661,8 +662,8 @@ func (s *Server) update() { transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil && previousDescription.Kind != description.Unknown - if isStreamingEnabled(s) && isStreamable(s) && !s.rttMonitor.started { - s.rttMonitor.connect() + if isStreamingEnabled(s) && isStreamable(s) { + s.monitorOnce.Do(s.rttMonitor.connect) } if isStreamable(s) || connectionIsStreaming || transitionedFromNetworkError { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index 3dbbcfb860d..0fb913d21b2 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -4,10 +4,19 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package topology contains types that handles the discovery, monitoring, and selection -// of servers. This package is designed to expose enough inner workings of service discovery -// and monitoring to allow low level applications to have fine grained control, while hiding -// most of the detailed implementation of the algorithms. +// Package topology is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package topology contains types that handles the discovery, monitoring, and +// selection of servers. This package is designed to expose enough inner +// workings of service discovery and monitoring to allow low level applications +// to have fine grained control, while hiding most of the detailed +// implementation of the algorithms. package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index abf09c15bd4..2199f855ba6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package wiremessage is intended for internal use only. It is made available +// to facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package wiremessage import ( "bytes" + "encoding/binary" "strings" "sync/atomic" @@ -231,10 +239,11 @@ func ReadHeader(src []byte) (length, requestID, responseTo int32, opcode OpCode, if len(src) < 16 { return 0, 0, 0, 0, src, false } - length = (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) - requestID = (int32(src[4]) | int32(src[5])<<8 | int32(src[6])<<16 | int32(src[7])<<24) - responseTo = (int32(src[8]) | int32(src[9])<<8 | int32(src[10])<<16 | int32(src[11])<<24) - opcode = OpCode(int32(src[12]) | int32(src[13])<<8 | int32(src[14])<<16 | int32(src[15])<<24) + + length = readi32unsafe(src) + requestID = readi32unsafe(src[4:]) + responseTo = readi32unsafe(src[8:]) + opcode = OpCode(readi32unsafe(src[12:])) return length, requestID, responseTo, opcode, src[16:], true } @@ -486,7 +495,7 @@ func ReadReplyCursorID(src []byte) (cursorID int64, rem []byte, ok bool) { return readi64(src) } -// ReadReplyStartingFrom reads the starting from from src. +// ReadReplyStartingFrom reads the starting from src. func ReadReplyStartingFrom(src []byte) (startingFrom int32, rem []byte, ok bool) { return readi32(src) } @@ -570,12 +579,16 @@ func ReadKillCursorsCursorIDs(src []byte, numIDs int32) (cursorIDs []int64, rem return cursorIDs, src, true } -func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) +func appendi32(dst []byte, x int32) []byte { + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(x)) + return append(dst, b...) } -func appendi64(b []byte, i int64) []byte { - return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) +func appendi64(dst []byte, x int64) []byte { + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(x)) + return append(dst, b...) } func appendCString(b []byte, str string) []byte { @@ -587,21 +600,18 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return readi32unsafe(src), src[4:], true } func readi32unsafe(src []byte) int32 { - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) + return int32(binary.LittleEndian.Uint32(src)) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func readcstring(src []byte) (string, []byte, bool) { diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c7d..6bf3abc41e7 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3bd..e2cb3ea944b 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f56982b..00000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f382e..6d9c8b64958 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -11,6 +11,7 @@ linters: enable: - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +22,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +128,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index cb28b36b995..c01e6998e0b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,83 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + ## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 ### Added @@ -33,6 +110,11 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) - Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) ## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 @@ -210,7 +292,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -242,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -886,7 +968,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1931,7 +2013,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2921,7 +3003,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.26.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 [1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d9222..20255493323 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7847b459088..b86572f58ea 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -570,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -625,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -643,6 +644,8 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index ca2f0ad037c..f33619f76a2 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default -ci: generate dependabot-check license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -39,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -70,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -252,15 +249,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -276,10 +264,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" @@ -292,16 +277,20 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown lint-markdown: diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 47f9a41f66d..5a890931731 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -97,12 +97,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd8b..940f57f3d87 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index b320314133a..9ea0ecbbd27 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -231,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 75773bc1ce9..c40c896cc66 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -302,7 +302,7 @@ func parseMember(member string) (Member, error) { // Decode a percent-encoded value. value, err := url.PathUnescape(val) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } return Member{key: key, value: value, properties: props, hasData: true}, nil } @@ -335,9 +335,9 @@ func (m Member) String() string { // A key is just an ASCII string. A value is restricted to be // US-ASCII characters excluding CTLs, whitespace, // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -735,7 +735,7 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeKeyCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } func validateValue(s string) bool { @@ -850,7 +850,7 @@ var safeValueCharset = [utf8.RuneSelf]bool{ } func validateValueChar(c int32) bool { - return c >= 0 && c <= int32(utf8.RuneSelf) && safeValueCharset[c] + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md new file mode 100644 index 00000000000..9184068d89c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Metric gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 16f9af12b66..428cfea2334 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" @@ -47,7 +36,7 @@ type client struct { } // newClient creates a new gRPC metric client. -func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { +func newClient(_ context.Context, cfg oconf.Config) (*client, error) { c := &client{ exportTimeout: cfg.Metrics.Timeout, requestFunc: cfg.RetryConfig.RequestFunc(retryable), @@ -65,7 +54,7 @@ func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} dialOpts = append(dialOpts, cfg.DialOptions...) - conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...) + conn, err := grpc.NewClient(cfg.Metrics.Endpoint, dialOpts...) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index 9269f5a01b7..38d7d60d403 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" @@ -196,6 +185,8 @@ func WithServiceConfig(serviceConfig string) Option { // gRPC connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go index 48e63689e00..3d74ef1a01d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC. @@ -40,7 +29,7 @@ The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as gRPC metadata associated with gRPC requests. -The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. @@ -63,12 +52,12 @@ OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_ The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - -the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - -the filepath to the clients private key to use in mTLS communication in PEM format. +the filepath to the client's private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 826276ba392..98afc0b1e9d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" @@ -90,7 +79,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e return fmt.Errorf("failed to upload metrics: %w", upErr) } // Merge the two errors. - return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr) + return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr) } return err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go index 17951ceb451..b2735ba923c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go index 06718efa898..95e2f4ba3b0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go index ae100513bad..7ae53f2d181 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index e2d99dc22c1..b6ed9a2bb65 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -2,24 +2,14 @@ // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import ( "crypto/tls" "fmt" + "net/http" "net/url" "path" "strings" @@ -53,6 +43,10 @@ const ( ) type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + SignalConfig struct { Endpoint string Insecure bool @@ -67,6 +61,8 @@ type ( TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector + + Proxy HTTPTransportProxyFunc } Config struct { @@ -371,3 +367,10 @@ func WithAggregationSelector(selector metric.AggregationSelector) GenericOption return cfg }) } + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go index 8a3c8422e1b..83f6d7fd1a7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go index 2e36e0b6f25..0229ac80bef 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go index f4d48198251..50e25fdbc72 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index 689779c3604..cc3a77055eb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any @@ -123,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go index e80798eebde..2605c74d054 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go index d5d2fdcb7ea..d31652b4d68 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" @@ -69,7 +58,7 @@ func (e *multiErr) append(err error) { // Do not use errors.As here, this should only be flattened one layer. If // there is a *multiErr several steps down the chain, all the errors above // it will be discarded if errors.As is used instead. - switch other := err.(type) { + switch other := err.(type) { //nolint:errorlint case *multiErr: // Flatten err errors into e. e.errs = append(e.errs, other.errs...) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go index 80f03b4b420..975e3b7aa1a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -2,18 +2,7 @@ // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package transform provides transformation functionality from the // sdk/metric/metricdata data-types into OTLP data-types. @@ -90,7 +79,7 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { out := &mpb.Metric{ Name: m.Name, Description: m.Description, - Unit: string(m.Unit), + Unit: m.Unit, } switch a := m.Data.(type) { case metricdata.Gauge[int64]: @@ -109,6 +98,8 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { out.Data, err = ExponentialHistogram(a) case metricdata.ExponentialHistogram[float64]: out.Data, err = ExponentialHistogram(a) + case metricdata.Summary: + out.Data = Summary(a) default: return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) } @@ -318,3 +309,44 @@ func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exe } return out } + +// Summary returns an OTLP Metric_Summary generated from s. +func Summary(s metricdata.Summary) *mpb.Metric_Summary { + return &mpb.Metric_Summary{ + Summary: &mpb.Summary{ + DataPoints: SummaryDataPoints(s.DataPoints), + }, + } +} + +// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from +// dPts. +func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint { + out := make([]*mpb.SummaryDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sdp := &mpb.SummaryDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: dPt.Sum, + QuantileValues: QuantileValues(dPt.QuantileValues), + } + out = append(out, sdp) + } + return out +} + +// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile +// generated from quantiles. +func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile { + out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles)) + for _, q := range quantiles { + quantile := &mpb.SummaryDataPoint_ValueAtQuantile{ + Quantile: q.Quantile, + Value: q.Value, + } + out = append(out, quantile) + } + return out +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index a4ce4da3e3e..a731860f5c0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.24.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index eeb39339d45..205594b7f34 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -294,7 +294,10 @@ func evaluate(err error) (bool, time.Duration) { return false, 0 } - rErr, ok := err.(retryableError) + // Do not use errors.As here, this should only be flattened one layer. If + // there are several chained errors, all the errors above it will be + // discarded if errors.As is used instead. + rErr, ok := err.(retryableError) //nolint:errorlint if !ok { return false, 0 } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go index 442d8096103..701deb6d390 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -79,7 +79,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e return fmt.Errorf("failed to upload metrics: %w", upErr) } // Merge the two errors. - return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr) + return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr) } return err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go index ea4cff080fc..a9a08ffe64e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -112,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go index 60d0d1f72ae..bb6d21f0b67 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go @@ -58,7 +58,7 @@ func (e *multiErr) append(err error) { // Do not use errors.As here, this should only be flattened one layer. If // there is a *multiErr several steps down the chain, all the errors above // it will be discarded if errors.As is used instead. - switch other := err.(type) { + switch other := err.(type) { //nolint:errorlint case *multiErr: // Flatten err errors into e. e.errs = append(e.errs, other.errs...) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go index 04c2ce75704..0a1a65c44d2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -79,7 +79,7 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { out := &mpb.Metric{ Name: m.Name, Description: m.Description, - Unit: string(m.Unit), + Unit: m.Unit, } switch a := m.Data.(type) { case metricdata.Gauge[int64]: diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index c69f4111d26..88e84ca892d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.26.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index f32766e57f6..822d8479474 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -14,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 0c8ed20a596..3a0cc42f6a4 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -281,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -358,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index f21898591e5..cfd1df9bfa2 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -65,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -164,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -230,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 596f716f40c..e31f442b48f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -86,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -101,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index c7234f4bc8d..cf23db77803 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -39,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -97,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -154,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 075234b3380..f153745b005 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -57,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -153,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index 15bebae084a..1a9dc68093f 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -102,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -174,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index 451413192a9..ea52e402331 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -16,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -23,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -51,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -81,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -116,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -146,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 7aa82e0c17b..6a7991e0151 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -47,17 +47,37 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -67,6 +87,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The @@ -77,6 +101,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -87,23 +115,47 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -113,6 +165,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and @@ -123,6 +179,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -133,6 +193,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index 4524a57d251..ca6fcbdc099 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -32,6 +32,8 @@ var ( _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} @@ -76,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6 return Int64Histogram{}, nil } +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { @@ -112,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric. return Float64Histogram{}, nil } +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { @@ -197,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 5420d546eb7..8403a4bad2d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -28,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -81,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -133,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -172,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 0dcbf06db99..783fdfba773 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -133,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -172,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 00000000000..8c5ac55ca93 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e13840..ab09daf9d53 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 7eaa0769602..07923ed8d94 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -22,7 +22,7 @@ const ( BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. // 512). Note: it must be less than or equal to - // EnvBatchSpanProcessorMaxQueueSize. + // BatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" // AttributeValueLengthKey is the maximum allowed attribute value size. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go deleted file mode 100644 index 1fc19d3fe3f..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go deleted file mode 100644 index a990092f9d1..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -import "time" - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 00000000000..fab61647c2d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 00000000000..68d296cbed3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 9a41f94e979..bbe7bf671fd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -122,7 +122,7 @@ func WithReader(r Reader) Option { }) } -// WithView associates views a MeterProvider. +// WithView associates views with a MeterProvider. // // Views are appended to existing ones in a MeterProvider if this option is // used multiple times. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 9155c242c71..82619da78ec 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -19,67 +19,63 @@ import ( // Note: This will only return non-nil values when the experimental exemplar // feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable // is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] { +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { if !x.Exemplars.Enabled() { return nil } - - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults - resF := func() func() exemplar.Reservoir[N] { - // Explicit bucket histogram aggregation with more than 1 bucket will - // use AlignedHistogramBucketExemplarReservoir. - a, ok := agg.(AggregationExplicitBucketHistogram) - if ok && len(a.Boundaries) > 0 { - cp := slices.Clone(a.Boundaries) - return func() exemplar.Reservoir[N] { - bounds := cp - return exemplar.Histogram[N](bounds) - } - } - - var n int - if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { - // Base2 Exponential Histogram Aggregation SHOULD use a - // SimpleFixedSizeExemplarReservoir with a reservoir equal to the - // smaller of the maximum number of buckets configured on the - // aggregation or twenty (e.g. min(20, max_buckets)). - n = int(a.MaxSize) - if n > 20 { - n = 20 - } - } else { - // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir - // This Exemplar reservoir MAY take a configuration parameter for - // the size of the reservoir. If no size configuration is - // provided, the default size MAY be the number of possible - // concurrent threads (e.g. number of CPUs) to help reduce - // contention. Otherwise, a default size of 1 SHOULD be used. - n = runtime.NumCPU() - if n < 1 { - // Should never be the case, but be defensive. - n = 1 - } - } - - return func() exemplar.Reservoir[N] { - return exemplar.FixedSize[N](n) - } - } - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + var filter exemplar.Filter + switch os.Getenv(filterEnvKey) { case "always_on": - return resF() + filter = exemplar.AlwaysOnFilter case "always_off": - return exemplar.Drop[N] + return exemplar.Drop case "trace_based": fallthrough default: - newR := resF() - return func() exemplar.Reservoir[N] { - return exemplar.SampledFilter(newR()) + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) } } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index fa8ce053b6c..b52a330b3bc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -18,10 +18,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) -var ( - zeroInstrumentKind InstrumentKind - zeroScope instrumentation.Scope -) +var zeroScope instrumentation.Scope // InstrumentKind is the identifier of a group of instruments that all // performing the same function. @@ -30,28 +27,32 @@ type InstrumentKind uint8 const ( // instrumentKindUndefined is an undefined instrument kind, it should not // be used by any initialized type. - instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused // InstrumentKindCounter identifies a group of instruments that record // increasing values synchronously with the code path they are measuring. - InstrumentKindCounter + InstrumentKindCounter InstrumentKind = 1 // InstrumentKindUpDownCounter identifies a group of instruments that // record increasing and decreasing values synchronously with the code path // they are measuring. - InstrumentKindUpDownCounter + InstrumentKindUpDownCounter InstrumentKind = 2 // InstrumentKindHistogram identifies a group of instruments that record a // distribution of values synchronously with the code path they are // measuring. - InstrumentKindHistogram + InstrumentKindHistogram InstrumentKind = 3 // InstrumentKindObservableCounter identifies a group of instruments that // record increasing values in an asynchronous callback. - InstrumentKindObservableCounter + InstrumentKindObservableCounter InstrumentKind = 4 // InstrumentKindObservableUpDownCounter identifies a group of instruments // that record increasing and decreasing values in an asynchronous // callback. - InstrumentKindObservableUpDownCounter + InstrumentKindObservableUpDownCounter InstrumentKind = 5 // InstrumentKindObservableGauge identifies a group of instruments that // record current values in an asynchronous callback. - InstrumentKindObservableGauge + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 ) type nonComparable [0]func() // nolint: unused // This is indeed used. @@ -73,11 +74,11 @@ type Instrument struct { nonComparable // nolint: unused } -// empty returns if all fields of i are their zero-value. -func (i Instrument) empty() bool { +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { return i.Name == "" && i.Description == "" && - i.Kind == zeroInstrumentKind && + i.Kind == instrumentKindUndefined && i.Unit == "" && i.Scope == zeroScope } @@ -108,7 +109,7 @@ func (i Instrument) matchesDescription(other Instrument) bool { // matchesKind returns true if the Kind of i is its zero-value or it equals the // Kind of other, otherwise false. func (i Instrument) matchesKind(other Instrument) bool { - return i.Kind == zeroInstrumentKind || i.Kind == other.Kind + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind } // matchesUnit returns true if the Unit of i is its zero-value or it equals the @@ -175,12 +176,14 @@ type int64Inst struct { embedded.Int64Counter embedded.Int64UpDownCounter embedded.Int64Histogram + embedded.Int64Gauge } var ( _ metric.Int64Counter = (*int64Inst)(nil) _ metric.Int64UpDownCounter = (*int64Inst)(nil) _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) ) func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { @@ -205,12 +208,14 @@ type float64Inst struct { embedded.Float64Counter embedded.Float64UpDownCounter embedded.Float64Histogram + embedded.Float64Gauge } var ( _ metric.Float64Counter = (*float64Inst)(nil) _ metric.Float64UpDownCounter = (*float64Inst)(nil) _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) ) func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go index d5f9e982c2b..25ea6244e57 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -15,11 +15,12 @@ func _() { _ = x[InstrumentKindObservableCounter-4] _ = x[InstrumentKindObservableUpDownCounter-5] _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] } -const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge" +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" -var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107} +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} func (i InstrumentKind) String() string { if i >= InstrumentKind(len(_InstrumentKind_index)-1) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index aa5229b09e4..b18ee719bd1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -39,7 +39,7 @@ type Builder[N int64 | float64] struct { // // If this is not provided a default factory function that returns an // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.Reservoir[N] + ReservoirFunc func() exemplar.FilteredReservoir[N] // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -50,12 +50,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] { +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { if b.ReservoirFunc != nil { return b.ReservoirFunc } - return exemplar.Drop[N] + return exemplar.Drop } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) @@ -74,21 +74,26 @@ func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { } // LastValue returns a last-value aggregate function input and output. -// -// The Builder.Temporality is ignored and delta is use always. func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { - // Delta temporality is the only temporality that makes semantic sense for - // a last-value aggregate. lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} - return b.filter(lv.measure), func(dest *metricdata.Aggregation) int { - // Ignore if dest is not a metricdata.Gauge. The chance for memory - // reuse of the DataPoints is missed (better luck next time). - gData, _ := (*dest).(metricdata.Gauge[N]) - lv.computeAggregation(&gData.DataPoints) - *dest = gData - - return len(gData.DataPoints) +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 00000000000..170ae8e58e2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index a6629ee3125..c9c7e8f62a9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -31,7 +31,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir[N] + res exemplar.FilteredReservoir[N] count uint64 min N @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -305,7 +305,7 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.FilteredReservoir[N] limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -319,8 +319,6 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib return } - t := now() - e.valuesMu.Lock() defer e.valuesMu.Unlock() @@ -333,7 +331,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib e.values[attr.Equivalent()] = v } v.record(value) - v.res.Offer(ctx, t, value, droppedAttr) + v.res.Offer(ctx, value, droppedAttr) } func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { @@ -376,7 +374,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ } @@ -429,7 +427,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ // TODO (#3006): This will use an unbounded amount of memory if there diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 911d7c18691..ade0941f5f5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -17,7 +17,7 @@ import ( type buckets[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir[N] + res exemplar.FilteredReservoir[N] counts []uint64 count uint64 @@ -48,13 +48,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.FilteredReservoir[N] limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -80,8 +80,6 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // (s.bounds[len(s.bounds)-1], +∞). idx := sort.SearchFloat64s(s.bounds, float64(value)) - t := now() - s.valuesMu.Lock() defer s.valuesMu.Unlock() @@ -106,12 +104,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute if !s.noSum { b.sum(value) } - b.res.Offer(ctx, t, value, droppedAttr) + b.res.Offer(ctx, value, droppedAttr) } // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, @@ -163,7 +161,7 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ } @@ -219,7 +217,7 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ // TODO (#3006): This will use an unbounded amount of memory if there diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 73cf98c7599..c359368403e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -15,17 +15,17 @@ import ( // datapoint is timestamped measurement data. type datapoint[N int64 | float64] struct { - attrs attribute.Set - timestamp time.Time - value N - res exemplar.Reservoir[N] + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] } -func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), values: make(map[attribute.Distinct]datapoint[N]), + start: now(), } } @@ -33,14 +33,13 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.FilteredReservoir[N] limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] + start time.Time } func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - t := now() - s.Lock() defer s.Unlock() @@ -51,30 +50,113 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. } d.attrs = attr - d.timestamp = t d.value = value - d.res.Offer(ctx, t, value, droppedAttr) + d.res.Offer(ctx, value, droppedAttr) s.values[attr.Equivalent()] = d } -func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + s.Lock() defer s.Unlock() + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { n := len(s.values) *dest = reset(*dest, n, n) var i int for _, v := range s.values { (*dest)[i].Attributes = v.attrs - // The event time is the only meaningful timestamp, StartTime is - // ignored. - (*dest)[i].Time = v.timestamp + (*dest)[i].StartTime = s.start + (*dest)[i].Time = t (*dest)[i].Value = v.value - v.res.Collect(&(*dest)[i].Exemplars) + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) i++ } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) // Do not report stale values. clear(s.values) + *dest = gData + + return n } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 7514b95e698..89136692260 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -15,19 +15,19 @@ import ( type sumValue[N int64 | float64] struct { n N - res exemplar.Reservoir[N] + res exemplar.FilteredReservoir[N] attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.FilteredReservoir[N] limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -36,8 +36,6 @@ func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) * } func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - t := now() - s.Lock() defer s.Unlock() @@ -49,7 +47,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S v.attrs = attr v.n += value - v.res.Offer(ctx, t, value, droppedAttr) + v.res.Offer(ctx, value, droppedAttr) s.values[attr.Equivalent()] = v } @@ -57,7 +55,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -94,7 +92,7 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = val.n - val.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) i++ } // Do not report stale values. @@ -129,7 +127,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = value.n - value.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, value.res.Collect) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not @@ -146,7 +144,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observatrions as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -188,7 +186,7 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta - value.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, value.res.Collect) newReported[key] = value.n i++ @@ -226,7 +224,7 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = val.n - val.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) i++ } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go index 729c2793ee1..5a0f39ae147 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -5,21 +5,19 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exempla import ( "context" - "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -// Drop returns a [Reservoir] that drops all measurements it is offered. -func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} } +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } type dropRes[N int64 | float64] struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {} +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *dropRes[N]) Collect(dest *[]Exemplar) { *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 00000000000..fcaa6a4697c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go index 53c86d5cc24..152a069a09e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -5,25 +5,25 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exempla import ( "context" - "time" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) -// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements -// to r if the passed context associated with the measurement contains a sampled -// [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] { - return filtered[N]{Reservoir: r} -} +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool -type filtered[N int64 | float64] struct { - Reservoir[N] +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() } -func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { - if trace.SpanContextFromContext(ctx).IsSampled() { - f.Reservoir.Offer(ctx, t, n, a) - } +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 00000000000..9fedfa4be68 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go index 463c8a7d313..a6ff86d0271 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -17,21 +17,30 @@ import ( // by bounds. // // The passed bounds will be sorted by this function. -func Histogram[N int64 | float64](bounds []float64) Reservoir[N] { +func Histogram(bounds []float64) Reservoir { slices.Sort(bounds) - return &histRes[N]{ + return &histRes{ bounds: bounds, - storage: newStorage[N](len(bounds) + 1), + storage: newStorage(len(bounds) + 1), } } -type histRes[N int64 | float64] struct { - *storage[N] +type histRes struct { + *storage // bounds are bucket bounds in ascending order. bounds []float64 } -func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { - r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a) +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go index 923953cb75b..199a2608f71 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -7,17 +7,21 @@ import ( "context" "math" "math/rand" + "sync" "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -// rng is used to make sampling decisions. -// -// Do not use crypto/rand. There is no reason for the decrease in performance -// given this is not a security sensitive decision. -var rng = rand.New(rand.NewSource(time.Now().UnixNano())) +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) // random returns, as a float64, a uniform pseudo-random number in the open // interval (0.0,1.0). @@ -39,6 +43,9 @@ func random() float64 { // // There are likely many other methods to explore here as well. + rngMu.Lock() + defer rngMu.Unlock() + f := rng.Float64() for f == 0 { f = rng.Float64() @@ -50,14 +57,14 @@ func random() float64 { // are k or less measurements made, the Reservoir will sample each one. If // there are more than k, the Reservoir will then randomly sample all // additional measurement with a decreasing probability. -func FixedSize[N int64 | float64](k int) Reservoir[N] { - r := &randRes[N]{storage: newStorage[N](k)} +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} r.reset() return r } -type randRes[N int64 | float64] struct { - *storage[N] +type randRes struct { + *storage // count is the number of measurement seen. count int64 @@ -69,7 +76,7 @@ type randRes[N int64 | float64] struct { w float64 } -func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December // 1994). "Reservoir-Sampling Algorithms of Time Complexity // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): @@ -125,7 +132,7 @@ func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute. } // reset resets r to the initial state. -func (r *randRes[N]) reset() { +func (r *randRes) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. @@ -147,7 +154,7 @@ func (r *randRes[N]) reset() { // advance updates the count at which the offered measurement will overwrite an // existing exemplar. -func (r *randRes[N]) advance() { +func (r *randRes) advance() { // Calculate the next value in the random number series. // // The current value of r.w is based on the max of a distribution of random @@ -174,7 +181,7 @@ func (r *randRes[N]) advance() { r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 } -func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *randRes) Collect(dest *[]Exemplar) { r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go index a663aa22970..80fa59554f2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -8,11 +8,10 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // Reservoir holds the sampled exemplar of measurements made. -type Reservoir[N int64 | float64] interface { +type Reservoir interface { // Offer accepts the parameters associated with a measurement. The // parameters will be stored as an exemplar if the Reservoir decides to // sample the measurement. @@ -24,10 +23,10 @@ type Reservoir[N int64 | float64] interface { // The time t is the time when the measurement was made. The val and attr // parameters are the value and dropped (filtered) attributes of the // measurement respectively. - Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue) + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. - Collect(dest *[]metricdata.Exemplar[N]) + Collect(dest *[]Exemplar) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go index 994ab10736b..10b2976f796 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -8,27 +8,26 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/trace" ) // storage is an exemplar storage for [Reservoir] implementations. -type storage[N int64 | float64] struct { +type storage struct { // store are the measurements sampled. // // This does not use []metricdata.Exemplar because it potentially would // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement[N] + store []measurement } -func newStorage[N int64 | float64](n int) *storage[N] { - return &storage[N]{store: make([]measurement[N], n)} +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} } // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. -func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *storage) Collect(dest *[]Exemplar) { *dest = reset(*dest, len(r.store), len(r.store)) var n int for _, m := range r.store { @@ -43,13 +42,13 @@ func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) { } // measurement is a measurement made by a telemetry system. -type measurement[N int64 | float64] struct { +type measurement struct { // FilteredAttributes are the attributes dropped during the measurement. FilteredAttributes []attribute.KeyValue // Time is the time when the measurement was made. Time time.Time // Value is the value of the measurement. - Value N + Value Value // SpanContext is the SpanContext active when a measurement was made. SpanContext trace.SpanContext @@ -57,8 +56,8 @@ type measurement[N int64 | float64] struct { } // newMeasurement returns a new non-empty Measurement. -func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] { - return measurement[N]{ +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ FilteredAttributes: droppedAttr, Time: ts, Value: v, @@ -67,8 +66,8 @@ func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, d } } -// Exemplar returns m as a [metricdata.Exemplar]. -func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) { +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { dest.FilteredAttributes = m.FilteredAttributes dest.Time = m.Time dest.Value = m.Value diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 00000000000..9daf27dc006 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + return int64(v.val) + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 7840c48647a..479b7610eb1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -108,6 +108,21 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, validateInstrumentName(name) } +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + // int64ObservableInstrument returns a new observable identified by the Instrument. // It registers callbacks for each reader's pipeline. func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { @@ -242,6 +257,21 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, validateInstrumentName(name) } +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + // float64ObservableInstrument returns a new observable identified by the Instrument. // It registers callbacks for each reader's pipeline. func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 9cdd9384c51..67ee1b11a2e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -334,7 +334,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { } sErr := r.exporter.Shutdown(ctx) - if err == nil || err == ErrReaderShutdown { + if err == nil || errors.Is(err, ErrReaderShutdown) { err = sErr } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index c3e2d9cc012..823bf2fe3d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -447,7 +447,12 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationDrop: // Return nil in and out to signify the drop aggregator. case AggregationLastValue: - meas, comp = b.LastValue() + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } case AggregationSum: switch kind { case InstrumentKindObservableCounter: @@ -464,7 +469,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationExplicitBucketHistogram: var noSum bool switch kind { - case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations @@ -474,7 +479,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationBase2ExponentialHistogram: var noSum bool switch kind { - case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations @@ -497,6 +502,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin // | Counter | ✓ | | ✓ | ✓ | ✓ | // | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | // | Observable Counter | ✓ | | ✓ | ✓ | ✓ | // | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. @@ -509,6 +515,7 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindHistogram, + InstrumentKindGauge, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: @@ -526,7 +533,8 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { return errIncompatibleAggregation } case AggregationLastValue: - if kind == InstrumentKindObservableGauge { + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: return nil } // TODO: review need for aggregation check after diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 9f900130043..a55f9a5372c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -148,7 +148,7 @@ func DefaultAggregationSelector(ik InstrumentKind) Aggregation { switch ik { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: return AggregationSum{} - case InstrumentKindObservableGauge: + case InstrumentKindObservableGauge, InstrumentKindGauge: return AggregationLastValue{} case InstrumentKindHistogram: return AggregationExplicitBucketHistogram{ @@ -166,7 +166,7 @@ type ReaderOption interface { ManualReaderOption } -// WithProducers registers producers as an external Producer of metric data +// WithProducer registers producers as an external Producer of metric data // for this Reader. func WithProducer(p Producer) ReaderOption { return producerOption{p: p} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index e413f910d94..dade0a19a2b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.26.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index 11e334319d0..cd08c673248 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -43,7 +43,7 @@ type View func(Instrument) (Stream, bool) // of the default. If you need to zero out an Stream field returned from a // View, create a View directly. func NewView(criteria Instrument, mask Stream) View { - if criteria.empty() { + if criteria.IsEmpty() { global.Error( errEmptyView, "dropping view", "mask", mask, diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 488cabc439a..6ac1cdbf7b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -9,9 +9,11 @@ import ( "os" "path/filepath" + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( @@ -36,6 +38,8 @@ type ( } defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} ) var ( @@ -43,6 +47,7 @@ var ( _ Detector = host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. @@ -95,3 +100,19 @@ func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) }, ).Detect(ctx) } + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index f3eeb45aca3..5ecd859a52d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 7b221c703f7..813f0562424 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 5acbec23d92..2d0f65498a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index cf0165a645b..8a48ab4fa32 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 8ba4e9a45cd..085fe68fd77 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 9f1af3a236d..ad4b50df404 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" ) // Resource describes an entity about which identifying information @@ -218,11 +219,17 @@ func Empty() *Resource { func Default() *Resource { defaultResourceOnce.Do(func() { var err error - defaultResource, err = Detect( - context.Background(), + defaultDetectors := []Detector{ defaultServiceNameDetector{}, fromEnv{}, telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., ) if err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 8a89fffdb4a..1d399a75db2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -381,7 +381,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R } } -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 69eb2fdfce3..821c83faa1d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -3,23 +3,43 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + // evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue struct { - queue []interface{} +type evictedQueue[T any] struct { + queue []T capacity int droppedCount int + logDropped func() } -func newEvictedQueue(capacity int) evictedQueue { +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. - return evictedQueue{capacity: capacity} + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } } // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. -func (eq *evictedQueue) add(value interface{}) { +func (eq *evictedQueue[T]) add(value T) { if eq.capacity == 0 { eq.droppedCount++ + eq.logDropped() return } @@ -28,6 +48,12 @@ func (eq *evictedQueue) add(value interface{}) { copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] eq.droppedCount++ + eq.logDropped() } eq.queue = append(eq.queue, value) } + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index f9633d8c576..925bcf99305 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -41,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return sid } @@ -51,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index dec237ca731..14c2e5bebda 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -291,7 +291,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { retErr = err } else { // Poor man's list of errors - retErr = fmt.Errorf("%v; %v", retErr, err) + retErr = fmt.Errorf("%w; %w", retErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index c44f6b926aa..ac90f1a2600 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -17,10 +17,10 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -137,12 +137,13 @@ type recordingSpan struct { // ReadOnlySpan exported when the span ends. attributes []attribute.KeyValue droppedAttributes int + logDropAttrsOnce sync.Once // events are stored in FIFO queue capped by configured limit. - events evictedQueue + events evictedQueue[Event] // links are stored in FIFO queue capped by configured limit. - links evictedQueue + links evictedQueue[Link] // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() @@ -219,7 +220,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { // No attributes allowed. - s.droppedAttributes += len(attributes) + s.addDroppedAttr(len(attributes)) return } @@ -236,7 +237,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) @@ -244,6 +245,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { } } +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that // exceed the limit. @@ -273,7 +290,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. - s.droppedAttributes++ + s.addDroppedAttr(1) continue } @@ -286,7 +303,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. - s.droppedAttributes++ + s.addDroppedAttr(1) } else { a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) @@ -367,7 +384,7 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // Store the end time as soon as possible to avoid artificially increasing // the span's duration in case some operation below takes a while. - et := internal.MonotonicEndTime(s.startTime) + et := monotonicEndTime(s.startTime) // Do relative expensive check now that we have an end time and see if we // need to do any more processing. @@ -418,6 +435,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } } +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + // RecordError will record err as a span event for this span. An additional call to // SetStatus is required if the Status of the Span should be set to Error, this method // does not change the Span status. If this span is not being recorded or err is nil @@ -585,7 +612,7 @@ func (s *recordingSpan) Links() []Link { if len(s.links.queue) == 0 { return []Link{} } - return s.interfaceArrayToLinksArray() + return s.links.copy() } // Events returns the events of this span. @@ -595,7 +622,7 @@ func (s *recordingSpan) Events() []Event { if len(s.events.queue) == 0 { return []Event{} } - return s.interfaceArrayToEventArray() + return s.events.copy() } // Status returns the status of this span. @@ -630,7 +657,11 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() || !link.SpanContext.IsValid() { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { return } @@ -713,32 +744,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } sd.droppedAttributeCount = s.droppedAttributes if len(s.events.queue) > 0 { - sd.events = s.interfaceArrayToEventArray() + sd.events = s.events.copy() sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { - sd.links = s.interfaceArrayToLinksArray() + sd.links = s.links.copy() sd.droppedLinkCount = s.links.droppedCount } return &sd } -func (s *recordingSpan) interfaceArrayToLinksArray() []Link { - linkArr := make([]Link, 0) - for _, value := range s.links.queue { - linkArr = append(linkArr, value.(Link)) - } - return linkArr -} - -func (s *recordingSpan) interfaceArrayToEventArray() []Event { - eventArr := make([]Event, 0) - for _, value := range s.events.queue { - eventArr = append(eventArr, value.(Event)) - } - return eventArr -} - func (s *recordingSpan) addChild() { if !s.IsRecording() { return diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 3668b1387d0..43419d3b541 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -132,8 +132,8 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa spanKind: trace.ValidateSpanKind(config.SpanKind()), name: name, startTime: startTime, - events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), tracer: tr, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index ec425157b6c..33d065a7cb9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.26.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe960cb..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 6e688345cbb..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4387 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go deleted file mode 100644 index 6c019aafc3e..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index d66bbe9c23d..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2545 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234e52..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 00000000000..2de1fc3c6be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 00000000000..d8dc822b263 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go index d27e8a8f8b3..d031bbea784 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 +// patterns for OpenTelemetry things. This package represents the v1.26.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go index 7235bb51d9a..bfaee0d56e3 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go similarity index 77% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go index a6b953f625e..fcdb9f48596 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -3,109 +3,259 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number // of connections that are currently in state described by the `state` // attribute. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental DBClientConnectionsUsageName = "db.client.connections.usage" DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." // DBClientConnectionsIdleMax is the metric conforming to the // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." // DBClientConnectionsIdleMin is the metric conforming to the // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.min` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMinName = "db.client.connections.idle.min" DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsMaxName = "db.client.connections.max" DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." // DBClientConnectionsPendingRequests is the metric conforming to the // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. + // the deprecated, use `db.client.connection.pending_requests` instead. // Instrument: updowncounter // Unit: {request} // Stability: Experimental DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." // DBClientConnectionsTimeouts is the metric conforming to the // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. + // deprecated, use `db.client.connection.timeouts` instead. // Instrument: counter // Unit: {timeout} // Stability: Experimental DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." // DBClientConnectionsCreateTime is the metric conforming to the // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsCreateTimeName = "db.client.connections.create_time" DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsWaitTime is the metric conforming to the // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsUseTime is the metric conforming to the // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsUseTimeName = "db.client.connections.use_time" DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." // AspnetcoreRoutingMatchAttempts is the metric conforming to the // "aspnetcore.routing.match_attempts" semantic conventions. It represents the // number of requests that were attempted to be matched to an endpoint. // Instrument: counter // Unit: {match_attempt} - // Stability: Experimental + // Stability: Stable AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." @@ -115,7 +265,7 @@ const ( // number of exceptions caught by exception handling middleware. // Instrument: counter // Unit: {exception} - // Stability: Experimental + // Stability: Stable AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" AspnetcoreDiagnosticsExceptionsUnit = "{exception}" AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." @@ -126,7 +276,7 @@ const ( // that hold a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." @@ -137,7 +287,7 @@ const ( // server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." @@ -148,7 +298,7 @@ const ( // limiting lease. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." @@ -159,7 +309,7 @@ const ( // acquire a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." @@ -169,69 +319,17 @@ const ( // number of requests that tried to acquire a rate limiting lease. // Instrument: counter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" AspnetcoreRateLimitingRequestsUnit = "{request}" AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - // KestrelActiveConnections is the metric conforming to the // "kestrel.active_connections" semantic conventions. It represents the number // of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelActiveConnectionsName = "kestrel.active_connections" KestrelActiveConnectionsUnit = "{connection}" KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -241,7 +339,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelConnectionDurationName = "kestrel.connection.duration" KestrelConnectionDurationUnit = "s" KestrelConnectionDurationDescription = "The duration of connections on the server." @@ -251,7 +349,7 @@ const ( // number of connections rejected by the server. // Instrument: counter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelRejectedConnectionsName = "kestrel.rejected_connections" KestrelRejectedConnectionsUnit = "{connection}" KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." @@ -261,7 +359,7 @@ const ( // of connections that are currently queued and are waiting to start. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelQueuedConnectionsName = "kestrel.queued_connections" KestrelQueuedConnectionsUnit = "{connection}" KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." @@ -272,7 +370,7 @@ const ( // currently queued and are waiting to start. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable KestrelQueuedRequestsName = "kestrel.queued_requests" KestrelQueuedRequestsUnit = "{request}" KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." @@ -282,7 +380,7 @@ const ( // number of connections that are currently upgraded (WebSockets). . // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" KestrelUpgradedConnectionsUnit = "{connection}" KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." @@ -292,7 +390,7 @@ const ( // duration of TLS handshakes on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" KestrelTLSHandshakeDurationUnit = "s" KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." @@ -302,7 +400,7 @@ const ( // number of TLS handshakes that are currently in progress on the server. // Instrument: updowncounter // Unit: {handshake} - // Stability: Experimental + // Stability: Stable KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" KestrelActiveTLSHandshakesUnit = "{handshake}" KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." @@ -312,7 +410,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable SignalrServerConnectionDurationName = "signalr.server.connection.duration" SignalrServerConnectionDurationUnit = "s" SignalrServerConnectionDurationDescription = "The duration of connections on the server." @@ -322,7 +420,7 @@ const ( // number of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable SignalrServerActiveConnectionsName = "signalr.server.active_connections" SignalrServerActiveConnectionsUnit = "{connection}" SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -481,6 +579,37 @@ const ( HTTPClientResponseBodySizeUnit = "By" HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic // conventions. It represents the measure of initial memory requested. // Instrument: updowncounter @@ -673,15 +802,15 @@ const ( MessagingReceiveDurationUnit = "s" MessagingReceiveDurationDescription = "Measures the duration of receive operation." - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. // Instrument: histogram // Unit: s // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." // MessagingPublishMessages is the metric conforming to the // "messaging.publish.messages" semantic conventions. It represents the @@ -703,15 +832,112 @@ const ( MessagingReceiveMessagesUnit = "{message}" MessagingReceiveMessagesDescription = "Measures the number of received messages." - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. // Instrument: counter // Unit: {message} // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." // RPCServerDuration is the metric conforming to the "rpc.server.duration" // semantic conventions. It represents the measures the duration of inbound @@ -883,6 +1109,16 @@ const ( SystemMemoryLimitUnit = "By" SystemMemoryLimitDescription = "Total memory available in the system." + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + // SystemMemoryUtilization is the metric conforming to the // "system.memory.utilization" semantic conventions. // Instrument: gauge @@ -1038,25 +1274,25 @@ const ( SystemNetworkConnectionsName = "system.network.connections" SystemNetworkConnectionsUnit = "{connection}" - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. // Instrument: updowncounter // Unit: {process} // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total // number of processes created over uptime of the host. // Instrument: counter // Unit: {process} // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" // SystemLinuxMemoryAvailable is the metric conforming to the // "system.linux.memory.available" semantic conventions. It represents an diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go index fe80b1731d0..4c87c7adcc7 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 1dfa52c5216..64a4f1b362f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -67,11 +67,13 @@ func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) span = Span{sc: sc} } else { // No parent, return a No-Op span with an empty span context. - span = Span{} + span = noopSpanInstance } return trace.ContextWithSpan(ctx, span), span } +var noopSpanInstance trace.Span = Span{} + // Span is an OpenTelemetry No-Op Span. type Span struct { embedded.Span diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ef97d30f9f5..ab28960524b 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.26.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index ecd2734e0eb..241cfc82a8d 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.26.0 + version: v1.28.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.48.0 + version: v0.50.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.2.0-alpha + version: v0.4.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -46,3 +46,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eecb06..016e90215cd 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go index 93da7322bc4..8cf5d8112e4 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -5,7 +5,7 @@ // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its // extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and // draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edfd5..90ef6a241de 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a315..4b0f8097f9e 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fee2f..3bee66294ec 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go index d1911c2e86e..7d9281e0259 100644 --- a/vendor/golang.org/x/crypto/md4/md4.go +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -7,7 +7,7 @@ // Deprecated: MD4 is cryptographically broken and should only be used // where compatibility with legacy systems, not security, is the goal. Instead, // use a secure hash like SHA-256 (from crypto/sha256). -package md4 // import "golang.org/x/crypto/md4" +package md4 import ( "crypto" diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go index f3c3242a047..1fe600ad034 100644 --- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -32,7 +32,7 @@ chunk size. This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. */ -package secretbox // import "golang.org/x/crypto/nacl/secretbox" +package secretbox import ( "golang.org/x/crypto/internal/alias" diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index bf2259537d2..e6c645e7ceb 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -5,7 +5,7 @@ // Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses // are signed messages attesting to the validity of a certificate for a small // period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" +package ocsp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e01d7..28cd99c7f3f 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go index 3fd05b27516..3685b344587 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package salsa provides low-level access to functions in the Salsa family. -package salsa // import "golang.org/x/crypto/salsa20/salsa" +package salsa import "math/bits" diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index c971a99fa67..76fa40fb20a 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -5,7 +5,7 @@ // Package scrypt implements the scrypt key derivation function as defined in // Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard // Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" +package scrypt import ( "crypto/sha256" diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9bf7..7e023090707 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -59,4 +59,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 0d8043fd2a1..c544b29e5f2 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -16,39 +17,50 @@ import ( // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. func New224() hash.Hash { - if h := new224Asm(); h != nil { - return h - } - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return new224() } // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. func New256() hash.Hash { - if h := new256Asm(); h != nil { - return h - } - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return new256() } // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. func New384() hash.Hash { - if h := new384Asm(); h != nil { - return h - } - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return new384() } // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. func New512() hash.Hash { - if h := new512Asm(); h != nil { - return h - } + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +func new224Generic() *state { + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +func new256Generic() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +func new384Generic() *state { + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +func new512Generic() *state { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go deleted file mode 100644 index fe8c84793c0..00000000000 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -import ( - "hash" -) - -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { return nil } - -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { return nil } - -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { return nil } - -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 00000000000..9d85fb62144 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd5049bb..00000000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index 4884d172a49..afedde5abf1 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -23,7 +23,6 @@ const ( type state struct { // Generic sponge components. a [25]uint64 // main state of the hash - buf []byte // points into storage rate int // the number of bytes of state to use // dsbyte contains the "domain separation" bits and the first bit of @@ -40,7 +39,8 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - storage storageBuf + i, n int // storage[i:n] is the buffer, i is only used while squeezing + storage [maxRate]byte // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes @@ -54,24 +54,18 @@ func (d *state) BlockSize() int { return d.rate } func (d *state) Size() int { return d.outputLen } // Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. +// the buffer indexes, and setting Sponge.state to absorbing. func (d *state) Reset() { // Zero the permutation's state. for i := range d.a { d.a[i] = 0 } d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] + d.i, d.n = 0, 0 } func (d *state) clone() *state { ret := *d - if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] - } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] - } - return &ret } @@ -82,43 +76,40 @@ func (d *state) permute() { case spongeAbsorbing: // If we're absorbing, we need to xor the input into the state // before applying the permutation. - xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] + xorIn(d, d.storage[:d.rate]) + d.n = 0 keccakF1600(&d.a) case spongeSqueezing: // If we're squeezing, we need to apply the permutation before // copying more output. keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.i = 0 + copyOut(d, d.storage[:d.rate]) } } // pads appends the domain separation bits in dsbyte, applies // the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute(dsbyte byte) { - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } +func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's // at least one byte of space in d.buf because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.buf = append(d.buf, dsbyte) - zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] - for i := zerosStart; i < d.rate; i++ { - d.buf[i] = 0 + d.storage[d.n] = d.dsbyte + d.n++ + for d.n < d.rate { + d.storage[d.n] = 0 + d.n++ } // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.buf[d.rate-1] ^= 0x80 + d.storage[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + d.n = d.rate + copyOut(d, d.storage[:d.rate]) } // Write absorbs more data into the hash's state. It panics if any @@ -127,28 +118,25 @@ func (d *state) Write(p []byte) (written int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } written = len(p) for len(p) > 0 { - if len(d.buf) == 0 && len(p) >= d.rate { + if d.n == 0 && len(p) >= d.rate { // The fast path; absorb a full "rate" bytes of input and apply the permutation. xorIn(d, p[:d.rate]) p = p[d.rate:] keccakF1600(&d.a) } else { // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - len(d.buf) + todo := d.rate - d.n if todo > len(p) { todo = len(p) } - d.buf = append(d.buf, p[:todo]...) + d.n += copy(d.storage[d.n:], p[:todo]) p = p[todo:] // If the sponge is full, apply the permutation. - if len(d.buf) == d.rate { + if d.n == d.rate { d.permute() } } @@ -161,19 +149,19 @@ func (d *state) Write(p []byte) (written int, err error) { func (d *state) Read(out []byte) (n int, err error) { // If we're still absorbing, pad and apply the permutation. if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) + d.padAndPermute() } n = len(out) // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.buf) - d.buf = d.buf[n:] + n := copy(out, d.storage[d.i:d.n]) + d.i += n out = out[n:] // Apply the permutation if we've squeezed the sponge dry. - if len(d.buf) == 0 { + if d.i == d.rate { d.permute() } } diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index d861bca5286..00d8034ae62 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -143,6 +143,12 @@ func (s *asmState) Write(b []byte) (int, error) { // Read squeezes an arbitrary number of bytes from the sponge. func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + n = len(out) // need to pad if we were absorbing @@ -202,8 +208,17 @@ func (s *asmState) Sum(b []byte) []byte { // Hash the buffer. Note that we don't clear it because we // aren't updating the state. - klmd(s.function, &a, nil, s.buf) - return append(b, a[:s.outputLen]...) + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } } // Reset resets the Hash to its initial state. @@ -233,56 +248,56 @@ func (s *asmState) Clone() ShakeHash { return s.clone() } -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_224) } - return nil + return new224Generic() } -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_256) } - return nil + return new256Generic() } -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_384) } - return nil + return new384Generic() } -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_512) } - return nil + return new512Generic() } -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_128) } - return nil + return newShake128Generic() } -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_256) } - return nil + return newShake256Generic() } diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index bb69984027f..1ea9275b8b7 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -115,19 +115,21 @@ func (c *state) Clone() ShakeHash { // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. func NewShake128() ShakeHash { - if h := newShake128Asm(); h != nil { - return h - } - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return newShake128() } // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { - if h := newShake256Asm(); h != nil { - return h - } + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} } diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go deleted file mode 100644 index 8d31cf5be2d..00000000000 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { - return nil -} - -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { - return nil -} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 00000000000..4276ba4ab2c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 7337cca88ed..6ada5c9574e 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,22 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !386 && !ppc64le) || purego - package sha3 -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} +import ( + "crypto/subtle" + "encoding/binary" + "unsafe" -var ( - xorIn = xorInGeneric - copyOut = copyOutGeneric - xorInUnaligned = xorInGeneric - copyOutUnaligned = copyOutGeneric + "golang.org/x/sys/cpu" ) -const xorImplementationUnaligned = "generic" +// xorIn xors the bytes in buf into the state. +func xorIn(d *state, buf []byte) { + if cpu.IsBigEndian { + for i := 0; len(buf) >= 8; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + subtle.XORBytes(ab[:], ab[:], buf) + } +} + +// copyOut copies uint64s to a byte buffer. +func copyOut(d *state, b []byte) { + if cpu.IsBigEndian { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + copy(b, ab[:]) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go deleted file mode 100644 index 8d947711272..00000000000 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import "encoding/binary" - -// xorInGeneric xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorInGeneric(d *state, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOutGeneric copies uint64s to a byte buffer. -func copyOutGeneric(d *state, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go deleted file mode 100644 index 870e2d16e07..00000000000 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || 386 || ppc64le) && !purego - -package sha3 - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - -// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorInUnaligned(d *state, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - if n >= 72 { - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - } - if n >= 104 { - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - } - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } - if n >= 144 { - d.a[17] ^= bw[17] - } - if n >= 168 { - d.a[18] ^= bw[18] - d.a[19] ^= bw[19] - d.a[20] ^= bw[20] - } -} - -func copyOutUnaligned(d *state, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} - -var ( - xorIn = xorInUnaligned - copyOut = copyOutUnaligned -) - -const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f76b5..f58bbc7ba4d 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 0e7b7e26792..66dcaf9803d 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -38,6 +38,7 @@ type File struct { Module *Module Go *Go Toolchain *Toolchain + Godebug []*Godebug Require []*Require Exclude []*Exclude Replace []*Replace @@ -65,6 +66,13 @@ type Toolchain struct { Syntax *Line } +// A Godebug is a single godebug key=value statement. +type Godebug struct { + Key string + Value string + Syntax *Line +} + // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version @@ -289,7 +297,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "require", "exclude", "replace", "retract": + case "module", "godebug", "require", "exclude", "replace", "retract": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -308,7 +316,9 @@ var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9]. // Toolchains must be named beginning with `go1`, // like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. -// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+ +// Note that this regexp is a much looser condition than go/version.IsValid, +// for forward compatibility. +// (This code has to be work to identify new toolchains even if we tweak the syntax in the future.) var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { @@ -384,7 +394,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a if len(args) != 1 { errorf("toolchain directive expects exactly one argument") return - } else if strict && !ToolchainRE.MatchString(args[0]) { + } else if !ToolchainRE.MatchString(args[0]) { errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } @@ -412,6 +422,22 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } f.Module.Mod = module.Version{Path: s} + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "require", "exclude": if len(args) != 2 { errorf("usage: %s module/path v1.2.3", verb) @@ -654,6 +680,22 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, f.Toolchain = &Toolchain{Syntax: line} f.Toolchain.Name = args[0] + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) @@ -929,6 +971,15 @@ func (f *File) Format() ([]byte, error) { // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { w := 0 + for _, g := range f.Godebug { + if g.Key != "" { + f.Godebug[w] = g + w++ + } + } + f.Godebug = f.Godebug[:w] + + w = 0 for _, r := range f.Require { if r.Mod.Path != "" { f.Require[w] = r @@ -1027,6 +1078,45 @@ func (f *File) AddToolchainStmt(name string) error { return nil } +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *File) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *File) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. @@ -1334,6 +1424,16 @@ func (f *File) SetRequireSeparateIndirect(req []*Require) { f.SortBlocks() } +func (f *File) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *File) DropRequire(path string) error { for _, r := range f.Require { if r.Mod.Path == path { diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index d7b99376ebe..8f54897cf7b 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -14,6 +14,7 @@ import ( type WorkFile struct { Go *Go Toolchain *Toolchain + Godebug []*Godebug Use []*Use Replace []*Replace @@ -68,7 +69,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) continue - case "use", "replace": + case "godebug", "use", "replace": for _, l := range x.Line { f.add(&errs, l, x.Token[0], l.Token, fix) } @@ -184,6 +185,55 @@ func (f *WorkFile) DropToolchainStmt() { } } +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *WorkFile) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *WorkFile) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + +func (f *WorkFile) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d9a5..3a7e5ab1765 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85243..9b4de94019b 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e97..105c3b279c0 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281872..003e649f30c 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40eee..6c349f3ec64 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -732,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -815,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -847,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -926,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1061,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1429,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1482,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1638,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1660,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2020,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2118,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2342,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2638,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2760,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2776,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2786,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2802,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd16d3..00000000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 00000000000..0b1c17b8129 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c753..61f511f97aa 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -860,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -893,7 +856,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +870,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -936,7 +905,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1131,7 +1113,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1143,9 +1126,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1155,7 +1138,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1193,7 +1176,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1308,23 +1291,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1385,24 +1375,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1432,8 +1405,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1444,7 +1418,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1458,21 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1497,28 +1456,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1581,7 +1520,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1590,21 +1529,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1753,7 +1677,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2015,7 +1939,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2298,7 +2222,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2320,6 +2244,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2386,7 +2311,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2423,7 +2348,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3021,7 +2946,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3076,7 +3001,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3120,7 +3045,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3131,20 +3057,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c669e2..f6783339d11 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 573fe79e86e..d7d4b8b6e35 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) { // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of // "example.com" matches "example.com" and all of its subdomains. func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } + zone = strings.TrimSuffix(zone, ".") if !strings.HasPrefix(zone, ".") { zone = "." + zone } @@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) { // AddHost specifies a host name that will use the bypass proxy. func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } + host = strings.TrimSuffix(host, ".") p.bypassHosts = append(p.bypassHosts, host) } diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go index 48a069e1903..dda7434666e 100644 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -16,7 +16,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -279,7 +278,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er } } if header := frame.HeaderReader(); header != nil { - io.Copy(ioutil.Discard, header) + io.Copy(io.Discard, header) } switch frame.PayloadType() { case ContinuationFrame: @@ -294,7 +293,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } - io.Copy(ioutil.Discard, frame) + io.Copy(io.Discard, frame) if frame.PayloadType() == PingFrame { if _, err := handler.WritePong(b[:n]); err != nil { return nil, err diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 90a2257cd54..923a5780ec5 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -17,7 +17,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -208,7 +207,7 @@ again: n, err = ws.frameReader.Read(msg) if err == io.EOF { if trailer := ws.frameReader.TrailerReader(); trailer != nil { - io.Copy(ioutil.Discard, trailer) + io.Copy(io.Discard, trailer) } ws.frameReader = nil goto again @@ -330,7 +329,7 @@ func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { ws.rio.Lock() defer ws.rio.Unlock() if ws.frameReader != nil { - _, err = io.Copy(ioutil.Discard, ws.frameReader) + _, err = io.Copy(io.Discard, ws.frameReader) if err != nil { return err } @@ -362,7 +361,7 @@ again: return ErrFrameTooLarge } payloadType := frame.PayloadType() - data, err := ioutil.ReadAll(frame) + data, err := io.ReadAll(frame) if err != nil { return err } diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index ba931c2c3de..7b82e7a0837 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -252,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) } func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529da..3a5e776f895 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d2..4cc7b005967 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7d..4e92e5aa406 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a4..07642c308d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284028..923e08cb792 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b52..7d73dda6473 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab74..057700111e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a9..97651b5bd04 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5f..eba761018aa 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -1224,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go index 48d76630caa..91a97264214 100644 --- a/vendor/golang.org/x/text/message/message.go +++ b/vendor/golang.org/x/text/message/message.go @@ -138,21 +138,20 @@ func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) { func lookupAndFormat(p *printer, r Reference, a []interface{}) { p.fmt.Reset(a) - var id, msg string switch v := r.(type) { case string: - id, msg = v, v + if p.catContext.Execute(v) == catalog.ErrNotFound { + p.Render(v) + return + } case key: - id, msg = v.id, v.fallback - default: - panic("key argument is not a Reference") - } - - if p.catContext.Execute(id) == catalog.ErrNotFound { - if p.catContext.Execute(msg) == catalog.ErrNotFound { - p.Render(msg) + if p.catContext.Execute(v.id) == catalog.ErrNotFound && + p.catContext.Execute(v.fallback) == catalog.ErrNotFound { + p.Render(v.fallback) return } + default: + panic("key argument is not a Reference") } } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 2c4c4e23289..6e34df46130 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) } // Does [start, end) overlap multiple children? @@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node { // // As a workaround, we inline the case for FuncType // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. // children = nil // discard ast.Walk(FuncDecl) info subtrees children = append(children, tok(n.Type.Func, len("func"))) @@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string { } panic(fmt.Sprintf("unexpected node type: %T", n)) } + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 919d5305ab4..6bdcf70ac27 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,6 +7,7 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. +// TODO(adonovan): use go1.22's ast.Unparen. func Unparen(e ast.Expr) ast.Expr { for { p, ok := e.(*ast.ParenExpr) diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index 333676b7cfc..00000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a8d7b06ac09..3531ac8f5fc 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -198,14 +198,6 @@ Instead, ssadump no longer requests the runtime package, but seeks it among the dependencies of the user-specified packages, and emits an error if it is not found. -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - Questions & Tasks - Add GOARCH/GOOS? diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 4335c1eb14c..c2b4b711b59 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -34,8 +34,8 @@ type DriverRequest struct { // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). Overlay map[string][]byte `json:"overlay"` } @@ -119,7 +119,19 @@ func findExternalDriver(cfg *Config) driver { stderr := new(bytes.Buffer) cmd := exec.CommandContext(cfg.Context, tool, words...) cmd.Dir = cfg.Dir - cmd.Env = cfg.Env + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -138,3 +150,7 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } + +// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// TODO(adonovan): use go1.21 slices.Clip. +func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 22305d9c90a..1a3a5b44f5c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,7 +21,6 @@ import ( "sync" "unicode" - "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -149,7 +148,7 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { errCh := make(chan error) go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -841,6 +840,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, + Overlay: cfg.goListOverlayFile, } } @@ -849,26 +849,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, cfg := state.cfg inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } inv.Verb = verb inv.Args = args gocmdRunner := cfg.gocmdRunner @@ -1015,67 +995,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return stdout, nil } -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := os.MkdirTemp("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := os.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { @@ -1104,3 +1023,44 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 865d90597a9..34306ddd390 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -37,10 +37,20 @@ import ( // A LoadMode controls the amount of detail to return when loading. // The bits below can be combined to specify which fields should be // filled in the result packages. +// // The zero value is a special case, equivalent to combining // the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// // ID and Errors (if present) will always be filled. -// Load may return more information than requested. +// [Load] may return more information than requested. +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://github.com/golang/go/issues/48226 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( @@ -123,15 +133,21 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. +// // Calls to Load do not modify this struct. +// +// TODO(adonovan): #67702: this is currently false: in fact, +// calls to [Load] do not modify the public fields of this struct, but +// may modify hidden fields, so concurrent calls to [Load] must not +// use the same Config. But perhaps we should reestablish the +// documented invariant. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode // Context specifies the context for the load operation. - // If the context is cancelled, the loader may stop early - // and return an ErrCancelled error. - // If Context is nil, the load cannot be cancelled. + // Cancelling the context may cause [Load] to abort and + // return an error. Context context.Context // Logf is the logger for the config. @@ -200,13 +216,23 @@ type Config struct { // setting Tests may have no effect. Tests bool - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. + // Overlay is a mapping from absolute file paths to file contents. // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. + // + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. Overlay map[string][]byte + + // goListOverlayFile is the JSON file that encodes the Overlay + // mapping, used by 'go list -overlay=...' + goListOverlayFile string } // Load loads and returns the Go packages named by the given patterns. @@ -214,8 +240,22 @@ type Config struct { // Config specifies loading options; // nil behaves the same as an empty Config. // -// Load returns an error if any of the patterns was invalid -// as defined by the underlying build system. +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// NeedName | NeedFiles | NeedCompiledGoFiles. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. +// +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. // It may return an empty list of packages without an error, // for instance for an empty expansion of a valid wildcard. // Errors associated with a particular package are recorded in the @@ -287,6 +327,17 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro // (fall through) } + // go list fallback + // + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err + } + defer cleanupOverlay() + cfg.goListOverlayFile = overlay + response, err := callDriverOnChunks(goListDriver, cfg, chunks) if err != nil { return nil, false, err @@ -366,6 +417,9 @@ func mergeResponses(responses ...*DriverResponse) *DriverResponse { } // A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Package struct { // ID is a unique identifier for a package, // in a syntax provided by the underlying build system. @@ -424,19 +478,30 @@ type Package struct { // to corresponding loaded Packages. Imports map[string]*Package + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + // Types provides type information for the package. // The NeedTypes LoadMode bit sets this field for packages matching the // patterns; type information for dependencies may be missing or incomplete, // unless NeedDeps and NeedImports are also set. - Types *types.Package + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. + Types *types.Package `json:"-"` // Fset provides position information for Types, TypesInfo, and Syntax. // It is set only when Types is set. - Fset *token.FileSet + Fset *token.FileSet `json:"-"` // IllTyped indicates whether the package or any dependency contains errors. // It is set only when Types is set. - IllTyped bool + IllTyped bool `json:"-"` // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. // @@ -446,26 +511,28 @@ type Package struct { // // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File + Syntax []*ast.File `json:"-"` // TypesInfo provides type information about the package's syntax trees. // It is set only when Syntax is set. - TypesInfo *types.Info + TypesInfo *types.Info `json:"-"` // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes + TypesSizes types.Sizes `json:"-"` + + // -- internal -- // forTest is the package under test, if any. forTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module } // Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Module struct { Path string // module path Version string // module version @@ -598,6 +665,7 @@ func (p *Package) UnmarshalJSON(b []byte) error { OtherFiles: flat.OtherFiles, EmbedFiles: flat.EmbedFiles, EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -854,6 +922,12 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { wg.Wait() } + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + result := make([]*Package, len(initial)) for i, lpkg := range initial { result[i] = lpkg.Package @@ -949,6 +1023,14 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) lpkg.Fset = ld.Fset + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + // Subtle: we populate all Types fields with an empty Package // before loading export data so that export data processing // never has to create a types.Package for an indirect dependency, @@ -1068,6 +1150,13 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + lpkg.TypesInfo = &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), Defs: make(map[*ast.Ident]types.Object), @@ -1245,11 +1334,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { parsed := make([]*ast.File, n) errors := make([]error, n) for i, file := range filenames { - if ld.Config.Context.Err() != nil { - parsed[i] = nil - errors[i] = ld.Config.Context.Err() - continue - } wg.Add(1) go func(i int, filename string) { parsed[i], errors[i] = ld.parseFile(filename) diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a2386c347a2..d648c3d071b 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, +// - The TT operators are encoded as [EKPRUTrC]; +// two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,19 +98,20 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -286,7 +287,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { // generic named type return Path(r), nil } @@ -462,7 +463,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + return r + } + if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -525,10 +529,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) + path2 := appendOpArg(path, op, i) if r := find(obj, tparam, path2, seen); r != nil { return r } @@ -580,10 +584,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFM] have an integer operand. + // Codes [AFMTr] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam: + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -664,6 +668,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -725,6 +740,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index f89112c8ee5..c24c2eee457 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -16,10 +16,14 @@ import ( // NewAlias creates a new TypeName in Package pkg that // is an alias for the type rhs. // -// When GoVersion>=1.22 and GODEBUG=gotypesalias=1, -// the Type() of the return value is a *types.Alias. -func NewAlias(pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { - if enabled() { +// The enabled parameter determines whether the resulting [TypeName]'s +// type is an [types.Alias]. Its value must be the result of a call to +// [Enabled], which computes the effective value of +// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled +// function is expensive and should be called once per task (e.g. +// package import), not once per call to NewAlias. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled { tname := types.NewTypeName(pos, pkg, name, nil) newAlias(tname, rhs) return tname diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go index 1872b56ff8f..c027b9f315f 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -15,16 +15,17 @@ import ( // It will never be created by go/types. type Alias struct{} -func (*Alias) String() string { panic("unreachable") } - +func (*Alias) String() string { panic("unreachable") } func (*Alias) Underlying() types.Type { panic("unreachable") } - -func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } // Unalias returns the type t for go <=1.21. func Unalias(t types.Type) types.Type { return t } -// Always false for go <=1.21. Ignores GODEBUG. -func enabled() bool { return false } - func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// Before go1.22, this function always returns false. +func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 8b92116284d..b3299548419 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -12,14 +12,22 @@ import ( "go/parser" "go/token" "go/types" - "os" - "strings" - "sync" ) // Alias is an alias of types.Alias. type Alias = types.Alias +// Rhs returns the type on the right-hand side of the alias declaration. +func Rhs(alias *Alias) types.Type { + if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { + return alias.Rhs() // go1.23+ + } + + // go1.22's Alias didn't have the Rhs method, + // so Unalias is the best we can do. + return Unalias(alias) +} + // Unalias is a wrapper of types.Unalias. func Unalias(t types.Type) types.Type { return types.Unalias(t) } @@ -33,40 +41,23 @@ func newAlias(tname *types.TypeName, rhs types.Type) *Alias { return a } -// enabled returns true when types.Aliases are enabled. -func enabled() bool { - // Use the gotypesalias value in GODEBUG if set. - godebug := os.Getenv("GODEBUG") - value := -1 // last set value. - for _, f := range strings.Split(godebug, ",") { - switch f { - case "gotypesalias=1": - value = 1 - case "gotypesalias=0": - value = 0 - } - } - switch value { - case 0: - return false - case 1: - return true - default: - return aliasesDefault() - } -} - -// aliasesDefault reports if aliases are enabled by default. -func aliasesDefault() bool { - // Dynamically check if Aliases will be produced from go/types. - aliasesDefaultOnce.Do(func() { - fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) - pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) - _, gotypesaliasDefault = pkg.Scope().Lookup("A").Type().(*types.Alias) - }) - return gotypesaliasDefault +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// This function is expensive! Call it sparingly. +func Enabled() bool { + // The only reliable way to compute the answer is to invoke go/types. + // We don't parse the GODEBUG environment variable, because + // (a) it's tricky to do so in a manner that is consistent + // with the godebug package; in particular, a simple + // substring check is not good enough. The value is a + // rightmost-wins list of options. But more importantly: + // (b) it is impossible to detect changes to the effective + // setting caused by os.Setenv("GODEBUG"), as happens in + // many tests. Therefore any attempt to cache the result + // is just incorrect. + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) + return enabled } - -var gotypesaliasDefault bool -var aliasesDefaultOnce sync.Once diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go deleted file mode 100644 index 581b26c2041..00000000000 --- a/vendor/golang.org/x/tools/internal/event/tag/tag.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tag provides the labels used for telemetry throughout gopls. -package tag - -import ( - "golang.org/x/tools/internal/event/keys" -) - -var ( - // create the label keys we use - Method = keys.NewString("method", "") - StatusCode = keys.NewString("status.code", "") - StatusMessage = keys.NewString("status.message", "") - RPCID = keys.NewString("id", "") - RPCDirection = keys.NewString("direction", "") - File = keys.NewString("file", "") - Directory = keys.New("directory", "") - URI = keys.New("URI", "") - Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs - PackagePath = keys.NewString("package_path", "") - Query = keys.New("query", "") - Snapshot = keys.NewUInt64("snapshot", "") - Operation = keys.NewString("operation", "") - - Position = keys.New("position", "") - Category = keys.NewString("category", "") - PackageCount = keys.NewInt("packages", "") - Files = keys.New("files", "") - Port = keys.NewInt("port", "") - Type = keys.New("type", "") - HoverKind = keys.NewString("hoverkind", "") - - NewServer = keys.NewString("new_server", "A new server was added") - EndServer = keys.NewString("end_server", "A server was shut down") - - ServerID = keys.NewString("server", "The server ID an event is related to") - Logfile = keys.NewString("logfile", "") - DebugAddress = keys.NewString("debug_address", "") - GoplsPath = keys.NewString("gopls_path", "") - ClientID = keys.NewString("client_id", "") - - Level = keys.NewInt("level", "The logging level") -) - -var ( - // create the stats we measure - Started = keys.NewInt64("started", "Count of started RPCs.") - ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) - SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) - Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) -) - -const ( - Inbound = "in" - Outbound = "out" -) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 683bd7395a6..deeb67f315a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -21,7 +21,6 @@ import ( "sort" "strconv" "strings" - "unsafe" "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" @@ -529,7 +528,7 @@ func (p *iexporter) doDecl(obj types.Object) { if alias, ok := t.(*aliases.Alias); ok { // Preserve materialized aliases, // even of non-exported types. - t = aliasRHS(alias) + t = aliases.Rhs(alias) } w.typ(t, obj.Pkg()) break @@ -1331,19 +1330,3 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } func internalErrorf(format string, args ...interface{}) error { return internalError(fmt.Sprintf(format, args...)) } - -// aliasRHS removes exactly one Alias constructor. -func aliasRHS(alias *aliases.Alias) types.Type { - // TODO(adonovan): if proposal #66559 is accepted, this will - // become Alias.RHS(alias). In the meantime, we must punch - // through the drywall. - type go123Alias struct { - _ *types.TypeName - _ *types.TypeParamList - RHS types.Type - _ types.Type - } - var raw *go123Alias - *(**aliases.Alias)(unsafe.Pointer(&raw)) = alias - return raw.RHS -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 2732121b5ef..136aa03653c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -210,6 +210,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte p := iimporter{ version: int(version), ipath: path, + aliases: aliases.Enabled(), shallow: shallow, reportf: reportf, @@ -369,6 +370,7 @@ type iimporter struct { version int ipath string + aliases bool shallow bool reportf ReportFunc // if non-nil, used to report bugs @@ -567,7 +569,7 @@ func (r *importReader) obj(name string) { // tparams := r.tparamList() // alias.SetTypeParams(tparams) // } - r.declare(aliases.NewAlias(pos, r.currPkg, name, typ)) + r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) case constTag: typ, val := r.value() diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b3be452ae8a..2c077068877 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -26,6 +26,7 @@ type pkgReader struct { ctxt *types.Context imports map[string]*types.Package // previously imported packages, indexed by path + aliases bool // create types.Alias nodes // lazily initialized arrays corresponding to the unified IR // PosBase, Pkg, and Type sections, respectively. @@ -99,6 +100,7 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st ctxt: ctxt, imports: imports, + aliases: aliases.Enabled(), posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), @@ -524,7 +526,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() typ := r.typ() - declare(aliases.NewAlias(pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) case pkgbits.ObjConst: pos := r.pos() diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index f7de3c8283b..2e59ff8558c 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,12 +8,14 @@ package gocommand import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" "log" "os" "os/exec" + "path/filepath" "reflect" "regexp" "runtime" @@ -25,7 +27,6 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -55,11 +56,14 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) -// verb is an event label for the go command verb. -var verb = keys.NewString("verb", "go command verb") +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) func invLabels(inv Invocation) []label.Label { - return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} } // Run is a convenience wrapper around RunRaw. @@ -165,7 +169,9 @@ type Invocation struct { // TODO(rfindley): remove, in favor of Args. ModFile string - // If Overlay is set, the go command is invoked with -overlay=Overlay. + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. // TODO(rfindley): remove, in favor of Args. Overlay string @@ -194,12 +200,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { - log := i.Logf - if log == nil { - log = func(string, ...interface{}) {} +// logf logs if i.Logf is non-nil. +func (i *Invocation) logf(format string, args ...any) { + if i.Logf != nil { + i.Logf(format, args...) } +} +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { goArgs := []string{i.Verb} appendModFile := func() { @@ -253,12 +261,15 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { waitDelay.Set(reflect.ValueOf(30 * time.Second)) } - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. if !i.CleanEnv { cmd.Env = os.Environ() } @@ -268,7 +279,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + debugStr := cmdDebugStr(cmd) + i.logf("starting %v", debugStr) + start := time.Now() + defer func() { + i.logf("%s for %v", time.Since(start), debugStr) + }() return runCmdContext(ctx, cmd) } @@ -349,6 +365,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } } + startTime := time.Now() err = cmd.Start() if stdoutW != nil { // The child process has inherited the pipe file, @@ -375,7 +392,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(cmd.Process) + HandleHangingGoCommand(startTime, cmd) case <-ctx.Done(): } } else { @@ -409,7 +426,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(proc *os.Process) { +func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { switch runtime.GOOS { case "linux", "darwin", "freebsd", "netbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND @@ -442,7 +459,7 @@ See golang/go#54461 for more details.`) panic(fmt.Sprintf("running %s: %v", listFiles, err)) } } - panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) + panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) } func cmdDebugStr(cmd *exec.Cmd) string { @@ -466,3 +483,73 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go index 2d3d408c0be..e38d1fb4888 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -107,3 +107,57 @@ func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*Modul } return mod, lines[4] == "go1.14", nil } + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 55980327616..dc7d50a7a40 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,6 +27,7 @@ import ( "unicode" "unicode/utf8" + "golang.org/x/sync/errgroup" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -104,7 +105,10 @@ type packageInfo struct { // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. -func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { +// +// It returns an error only if ctx is cancelled. Files with parse errors are +// ignored. +func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename string) ([]*ast.File, error) { // This could use go/packages but it doesn't buy much, and it fails // with https://golang.org/issue/26296 in LoadFiles mode in some cases. considerTests := strings.HasSuffix(filename, "_test.go") @@ -112,11 +116,14 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { fileBase := filepath.Base(filename) packageFileInfos, err := os.ReadDir(srcDir) if err != nil { - return nil + return nil, ctx.Err() } var files []*ast.File for _, fi := range packageFileInfos { + if ctx.Err() != nil { + return nil, ctx.Err() + } if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { continue } @@ -132,7 +139,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { files = append(files, f) } - return files + return files, ctx.Err() } // addGlobals puts the names of package vars into the provided map. @@ -301,6 +308,20 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { return nil } +// if there is a trailing major version, remove it +func withoutVersion(nm string) string { + if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { + if _, err := strconv.Atoi(v[1:]); err == nil { + // this is, for instance, called with rand/v2 and returns rand + if len(v) < len(nm) { + xnm := nm[:len(nm)-len(v)-1] + return path.Base(xnm) + } + } + } + return nm +} + // importIdentifier returns the identifier that imp will introduce. It will // guess if the package name has not been loaded, e.g. because the source // is not available. @@ -310,7 +331,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.name != "" { - return known.name + return withoutVersion(known.name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -345,9 +366,7 @@ func (p *pass) load() ([]*ImportFix, bool) { if p.loadRealPackageNames { err := p.loadPackageNames(append(imports, p.candidates...)) if err != nil { - if p.env.Logf != nil { - p.env.Logf("loading package names: %v", err) - } + p.env.logf("loading package names: %v", err) return nil, false } } @@ -546,6 +565,8 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { // // This is declared as a variable rather than a function so goimports can // easily be extended by adding a file with an init function. +// +// DO NOT REMOVE: used internally at Google. var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { @@ -565,9 +586,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st return nil, err } srcDir := filepath.Dir(abs) - if env.Logf != nil { - env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) - } + env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already @@ -578,7 +597,10 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st return fixes, nil } - otherFiles := parseOtherFiles(fset, srcDir, filename) + otherFiles, err := parseOtherFiles(ctx, fset, srcDir, filename) + if err != nil { + return nil, err + } // Second pass: add information from other files in the same package, // like their package vars and imports. @@ -996,16 +1018,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // already know the view type. if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) + e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { e.resolverErr = err + e.logf("failed to create module resolver: %v", err) } else { e.resolver = Resolver(r) + e.logf("created module resolver") } } return e.resolver, e.resolverErr } +// logf logs if e.Logf is non-nil. +func (e *ProcessEnv) logf(format string, args ...any) { + if e.Logf != nil { + e.Logf(format, args...) + } +} + // buildContext returns the build.Context to use for matching files. // // TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform @@ -1059,6 +1091,18 @@ func addStdlibCandidates(pass *pass, refs references) error { if err != nil { return err } + localbase := func(nm string) string { + ans := path.Base(nm) + if ans[0] == 'v' { + // this is called, for instance, with math/rand/v2 and returns rand/v2 + if _, err := strconv.Atoi(ans[1:]); err == nil { + ix := strings.LastIndex(nm, ans) + more := path.Base(nm[:ix]) + ans = path.Join(more, ans) + } + } + return ans + } add := func(pkg string) { // Prevent self-imports. if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { @@ -1067,13 +1111,17 @@ func addStdlibCandidates(pass *pass, refs references) error { exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: path.Base(pkg), exports: exports}) + &packageInfo{name: localbase(pkg), exports: exports}) } for left := range refs { if left == "rand" { - // Make sure we try crypto/rand before math/rand. + // Make sure we try crypto/rand before any version of math/rand as both have Int() + // and our policy is to recommend crypto add("crypto/rand") - add("math/rand") + // if the user's no later than go1.21, this should be "math/rand" + // but we have no way of figuring out what the user is using + // TODO: investigate using the toolchain version to disambiguate in the stdlib + add("math/rand/v2") continue } for importPath := range stdlib.PackageSymbols { @@ -1093,8 +1141,8 @@ type Resolver interface { // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error - // loadExports returns the set of exported symbols in the package at dir. - // loadExports may be called concurrently. + // loadExports returns the package name and set of exported symbols in the + // package at dir. loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) // scoreImportPath returns the relevance for an import path. @@ -1162,7 +1210,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil if err != nil { return err } - if err = resolver.scan(context.Background(), callback); err != nil { + if err = resolver.scan(ctx, callback); err != nil { return err } @@ -1171,54 +1219,52 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil imp *ImportInfo pkg *packageInfo } - results := make(chan result, len(refs)) + results := make([]*result, len(refs)) - ctx, cancel := context.WithCancel(context.TODO()) - var wg sync.WaitGroup - defer func() { - cancel() - wg.Wait() - }() - var ( - firstErr error - firstErrOnce sync.Once - ) - for pkgName, symbols := range refs { - wg.Add(1) - go func(pkgName string, symbols map[string]bool) { - defer wg.Done() + g, ctx := errgroup.WithContext(ctx) - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) + searcher := symbolSearcher{ + logf: pass.env.logf, + srcDir: pass.srcDir, + xtest: strings.HasSuffix(pass.f.Name.Name, "_test"), + loadExports: resolver.loadExports, + } + i := 0 + for pkgName, symbols := range refs { + index := i // claim an index in results + i++ + pkgName := pkgName + symbols := symbols + + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) if err != nil { - firstErrOnce.Do(func() { - firstErr = err - cancel() - }) - return + return err } - if found == nil { - return // No matching package. + return nil // No matching package. } imp := &ImportInfo{ ImportPath: found.importPathShort, } - pkg := &packageInfo{ name: pkgName, exports: symbols, } - results <- result{imp, pkg} - }(pkgName, symbols) + results[index] = &result{imp, pkg} + return nil + }) + } + if err := g.Wait(); err != nil { + return err } - go func() { - wg.Wait() - close(results) - }() - for result := range results { + for _, result := range results { + if result == nil { + continue + } // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. if types.Universe.Lookup(result.pkg.name) != nil { // predeclared @@ -1232,7 +1278,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil } pass.addCandidate(result.imp, result.pkg) } - return firstErr + return nil } // notIdentifier reports whether ch is an invalid identifier character. @@ -1576,9 +1622,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl fullFile := filepath.Join(dir, fi.Name()) f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - if env.Logf != nil { - env.Logf("error parsing %v: %v", fullFile, err) - } + env.logf("error parsing %v: %v", fullFile, err) continue } if f.Name.Name == "documentation" { @@ -1614,9 +1658,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } sortSymbols(exports) - if env.Logf != nil { - env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) - } + env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) return pkgName, exports, nil } @@ -1626,25 +1668,39 @@ func sortSymbols(syms []stdlib.Symbol) { }) } -// findImport searches for a package with the given symbols. -// If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { +// A symbolSearcher searches for a package with a set of symbols, among a set +// of candidates. See [symbolSearcher.search]. +// +// The search occurs within the scope of a single file, with context captured +// in srcDir and xtest. +type symbolSearcher struct { + logf func(string, ...any) + srcDir string // directory containing the file + xtest bool // if set, the file containing is an x_test file + loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) +} + +// search searches the provided candidates for a package containing all +// exported symbols. +// +// If successful, returns the resulting package. +func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Logf != nil { + if s.logf != nil { for i, c := range candidates { - pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } - resolver, err := pass.env.GetResolver() - if err != nil { - return nil, err - } - // Collect exports for packages with matching names. + // Arrange rescv so that we can we can await results in order of relevance + // and exit as soon as we find the first match. + // + // Search with bounded concurrency, returning as soon as the first result + // among rescv is non-nil. rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1652,6 +1708,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa const maxConcurrentPackageImport = 4 loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + // Ensure that all work is completed at exit. ctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup defer func() { @@ -1659,6 +1716,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa wg.Wait() }() + // Start the search. wg.Add(1) go func() { defer wg.Done() @@ -1669,55 +1727,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa return } + i := i + c := c wg.Add(1) - go func(c pkgDistance, resc chan<- *pkg) { + go func() { defer func() { <-loadExportsSem wg.Done() }() - - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + if s.logf != nil { + s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - // If we're an x_test, load the package under test's test variant. - includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + pkg, err := s.searchOne(ctx, c, symbols) if err != nil { - if pass.env.Logf != nil { - pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) - } - resc <- nil - return - } - - exportsMap := make(map[string]bool, len(exports)) - for _, sym := range exports { - exportsMap[sym.Name] = true - } - - // If it doesn't have the right - // symbols, send nil to mean no match. - for symbol := range symbols { - if !exportsMap[symbol] { - resc <- nil - return + if s.logf != nil && ctx.Err() == nil { + s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } + pkg = nil } - resc <- c.pkg - }(c, rescv[i]) + rescv[i] <- pkg // may be nil + }() } }() + // Await the first (best) result. for _, resc := range rescv { - pkg := <-resc - if pkg == nil { - continue + select { + case r := <-resc: + if r != nil { + return r, nil + } + case <-ctx.Done(): + return nil, ctx.Err() } - return pkg, nil } return nil, nil } +func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + // If we're considering the package under test from an x_test, load the + // test variant. + includeTest := s.xtest && c.pkg.dir == s.srcDir + _, exports, err := s.loadExports(ctx, c.pkg, includeTest) + if err != nil { + return nil, err + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + for symbol := range symbols { + if !exportsMap[symbol] { + return nil, nil // no match + } + } + return c.pkg, nil +} + // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1737,58 +1807,24 @@ func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { } // Speed optimization to minimize disk I/O: - // the last two components on disk must contain the - // package name somewhere. // - // This permits mismatch naming like directory - // "go-foo" being package "foo", or "pkg.v3" being "pkg", - // or directory "google.golang.org/api/cloudbilling/v1" - // being package "cloudbilling", but doesn't - // permit a directory "foo" to be package - // "bar", which is strongly discouraged - // anyway. There's no reason goimports needs - // to be slow just to accommodate that. + // Use the matchesPath heuristic to filter to package paths that could + // reasonably match a dangling reference. + // + // This permits mismatch naming like directory "go-foo" being package "foo", + // or "pkg.v3" being "pkg", or directory + // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but + // doesn't permit a directory "foo" to be package "bar", which is strongly + // discouraged anyway. There's no reason goimports needs to be slow just to + // accommodate that. for pkgIdent := range refs { - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { + if matchesPath(pkgIdent, pkg.importPathShort) { return true } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - } } return false } -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) -} - // canUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. func canUse(filename, dir string) bool { @@ -1829,19 +1865,84 @@ func canUse(filename, dir string) bool { return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") } -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { +// matchesPath reports whether ident may match a potential package name +// referred to by path, using heuristics to filter out unidiomatic package +// names. +// +// Specifically, it checks whether either of the last two '/'- or '\'-delimited +// path segments matches the identifier. The segment-matching heuristic must +// allow for various conventions around segment naming, including go-foo, +// foo-go, and foo.v3. To handle all of these, matching considers both (1) the +// entire segment, ignoring '-' and '.', as well as (2) the last subsegment +// separated by '-' or '.'. So the segment foo-go matches all of the following +// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII +// identifiers). +// +// See the docstring for [pkgIsCandidate] for an explanation of how this +// heuristic filters potential candidate packages. +func matchesPath(ident, path string) bool { + // Ignore case, for ASCII. + lowerIfASCII := func(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b + } + + // match reports whether path[start:end] matches ident, ignoring [.-]. + match := func(start, end int) bool { + ii := len(ident) - 1 // current byte in ident + pi := end - 1 // current byte in path + for ; pi >= start && ii >= 0; pi-- { + pb := path[pi] + if pb == '-' || pb == '.' { + continue + } + pb = lowerIfASCII(pb) + ib := lowerIfASCII(ident[ii]) + if pb != ib { + return false + } + ii-- + } + return ii < 0 && pi < start // all bytes matched + } + + // segmentEnd and subsegmentEnd hold the end points of the current segment + // and subsegment intervals. + segmentEnd := len(path) + subsegmentEnd := len(path) + + // Count slashes; we only care about the last two segments. nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { + + for i := len(path) - 1; i >= 0; i-- { + switch b := path[i]; b { + // TODO(rfindley): we handle backlashes here only because the previous + // heuristic handled backslashes. This is perhaps overly defensive, but is + // the result of many lessons regarding Chesterton's fence and the + // goimports codebase. + // + // However, this function is only ever called with something called an + // 'importPath'. Is it possible that this is a real import path, and + // therefore we need only consider forward slashes? + case '/', '\\': + if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) { + return true + } nslash++ if nslash == 2 { - return v[i:] + return false // did not match above + } + segmentEnd, subsegmentEnd = i, i // reset + case '-', '.': + if match(i+1, subsegmentEnd) { + return true } + subsegmentEnd = i } } - return v + return match(0, segmentEnd) || match(0, subsegmentEnd) } type visitFn func(node ast.Node) ast.Visitor diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 21ef938978e..91221fda322 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -112,11 +112,11 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe } vendorEnabled := false - var mainModVendor *gocommand.ModuleJSON + var mainModVendor *gocommand.ModuleJSON // for module vendoring + var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring - // Module vendor directories are ignored in workspace mode: - // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md - if len(r.env.Env["GOWORK"]) == 0 { + goWork := r.env.Env["GOWORK"] + if len(goWork) == 0 { // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but // they should be available from the ProcessEnv. Can we avoid the redundant // invocation? @@ -124,18 +124,35 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe if err != nil { return nil, err } + } else { + vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err + } } - if mainModVendor != nil && vendorEnabled { - // Vendor mode is on, so all the non-Main modules are irrelevant, - // and we need to search /vendor for everything. - r.mains = []*gocommand.ModuleJSON{mainModVendor} - r.dummyVendorMod = &gocommand.ModuleJSON{ - Path: "", - Dir: filepath.Join(mainModVendor.Dir, "vendor"), + if vendorEnabled { + if mainModVendor != nil { + // Module vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = []*gocommand.ModuleJSON{mainModVendor} + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainModVendor.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + } else { + // Workspace vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = mainModsVendor + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(filepath.Dir(goWork), "vendor"), + } + r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) } - r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} - r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. err := r.initAllMods() @@ -166,8 +183,9 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe return count(j) < count(i) // descending order }) - r.roots = []gopathwalk.Root{ - {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}, + r.roots = []gopathwalk.Root{} + if goenv["GOROOT"] != "" { // "" happens in tests + r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}) } r.mainByDir = make(map[string]*gocommand.ModuleJSON) for _, main := range r.mains { @@ -247,9 +265,7 @@ func (r *ModuleResolver) initAllMods() error { return err } if mod.Dir == "" { - if r.env.Logf != nil { - r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) - } + r.env.logf("module %v has not been downloaded and will be ignored", mod.Path) // Can't do anything with a module that's not downloaded. continue } @@ -748,9 +764,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Logf != nil { - r.env.Logf("decoding module cache path %q: %v", subdir, err) - } + r.env.logf("decoding module cache path %q: %v", subdir, err) return directoryPackageInfo{ status: directoryScanned, err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index b92e8e6eb32..2acd85851e3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -23,6 +23,9 @@ type PkgDecoder struct { // version is the file format version. version uint32 + // aliases determines whether types.Aliases should be created + aliases bool + // sync indicates whether the file uses sync markers. sync bool @@ -73,6 +76,7 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, + //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index fd6892075ee..a928acf29fa 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrWriteAfterClose", Var, 0}, {"ErrWriteTooLong", Var, 0}, {"FileInfoHeader", Func, 1}, + {"FileInfoNames", Type, 23}, {"Format", Type, 10}, {"FormatGNU", Const, 10}, {"FormatPAX", Const, 10}, @@ -820,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, {"(*Dialer).Dial", Method, 15}, {"(*Dialer).DialContext", Method, 15}, + {"(*ECHRejectionError).Error", Method, 23}, {"(*QUICConn).Close", Method, 21}, {"(*QUICConn).ConnectionState", Method, 21}, {"(*QUICConn).HandleData", Method, 21}, @@ -827,6 +829,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*QUICConn).SendSessionTicket", Method, 21}, {"(*QUICConn).SetTransportParameters", Method, 21}, {"(*QUICConn).Start", Method, 21}, + {"(*QUICConn).StoreSession", Method, 23}, {"(*SessionState).Bytes", Method, 21}, {"(AlertError).Error", Method, 21}, {"(ClientAuthType).String", Method, 15}, @@ -877,6 +880,8 @@ var PackageSymbols = map[string][]Symbol{ {"Config.ClientSessionCache", Field, 3}, {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, {"Config.GetConfigForClient", Field, 8}, @@ -902,6 +907,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState", Type, 0}, {"ConnectionState.CipherSuite", Field, 0}, {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.ECHAccepted", Field, 23}, {"ConnectionState.HandshakeComplete", Field, 0}, {"ConnectionState.NegotiatedProtocol", Field, 0}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, @@ -925,6 +931,8 @@ var PackageSymbols = map[string][]Symbol{ {"ECDSAWithP384AndSHA384", Const, 8}, {"ECDSAWithP521AndSHA512", Const, 8}, {"ECDSAWithSHA1", Const, 10}, + {"ECHRejectionError", Type, 23}, + {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, @@ -943,6 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, + {"QUICConfig.EnableStoreSessionEvent", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, @@ -954,16 +963,20 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEvent.Data", Field, 21}, {"QUICEvent.Kind", Field, 21}, {"QUICEvent.Level", Field, 21}, + {"QUICEvent.SessionState", Field, 23}, {"QUICEvent.Suite", Field, 21}, {"QUICEventKind", Type, 21}, {"QUICHandshakeDone", Const, 21}, {"QUICNoEvent", Const, 21}, {"QUICRejectedEarlyData", Const, 21}, + {"QUICResumeSession", Const, 23}, {"QUICServer", Func, 21}, {"QUICSessionTicketOptions", Type, 21}, {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSessionTicketOptions.Extra", Field, 23}, {"QUICSetReadSecret", Const, 21}, {"QUICSetWriteSecret", Const, 21}, + {"QUICStoreSession", Const, 23}, {"QUICTransportParameters", Const, 21}, {"QUICTransportParametersRequired", Const, 21}, {"QUICWriteData", Const, 21}, @@ -1036,6 +1049,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Certificate).Verify", Method, 0}, {"(*Certificate).VerifyHostname", Method, 0}, {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*OID).UnmarshalBinary", Method, 23}, + {"(*OID).UnmarshalText", Method, 23}, {"(*RevocationList).CheckSignatureFrom", Method, 19}, {"(CertificateInvalidError).Error", Method, 0}, {"(ConstraintViolationError).Error", Method, 0}, @@ -1043,6 +1058,8 @@ var PackageSymbols = map[string][]Symbol{ {"(InsecureAlgorithmError).Error", Method, 6}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, + {"(OID).MarshalBinary", Method, 23}, + {"(OID).MarshalText", Method, 23}, {"(OID).String", Method, 22}, {"(PublicKeyAlgorithm).String", Method, 10}, {"(SignatureAlgorithm).String", Method, 6}, @@ -1196,6 +1213,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseCertificates", Func, 0}, {"ParseDERCRL", Func, 0}, {"ParseECPrivateKey", Func, 1}, + {"ParseOID", Func, 23}, {"ParsePKCS1PrivateKey", Func, 0}, {"ParsePKCS1PublicKey", Func, 10}, {"ParsePKCS8PrivateKey", Func, 0}, @@ -2541,6 +2559,7 @@ var PackageSymbols = map[string][]Symbol{ {"PT_NOTE", Const, 0}, {"PT_NULL", Const, 0}, {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_NOBTCFI", Const, 23}, {"PT_OPENBSD_RANDOMIZE", Const, 16}, {"PT_OPENBSD_WXNEEDED", Const, 16}, {"PT_PAX_FLAGS", Const, 16}, @@ -3620,13 +3639,16 @@ var PackageSymbols = map[string][]Symbol{ {"STT_COMMON", Const, 0}, {"STT_FILE", Const, 0}, {"STT_FUNC", Const, 0}, + {"STT_GNU_IFUNC", Const, 23}, {"STT_HIOS", Const, 0}, {"STT_HIPROC", Const, 0}, {"STT_LOOS", Const, 0}, {"STT_LOPROC", Const, 0}, {"STT_NOTYPE", Const, 0}, {"STT_OBJECT", Const, 0}, + {"STT_RELC", Const, 23}, {"STT_SECTION", Const, 0}, + {"STT_SRELC", Const, 23}, {"STT_TLS", Const, 0}, {"STV_DEFAULT", Const, 0}, {"STV_HIDDEN", Const, 0}, @@ -4544,11 +4566,14 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0}, }, "encoding/binary": { + {"Append", Func, 23}, {"AppendByteOrder", Type, 19}, {"AppendUvarint", Func, 19}, {"AppendVarint", Func, 19}, {"BigEndian", Var, 0}, {"ByteOrder", Type, 0}, + {"Decode", Func, 23}, + {"Encode", Func, 23}, {"LittleEndian", Var, 0}, {"MaxVarintLen16", Const, 0}, {"MaxVarintLen32", Const, 0}, @@ -5308,6 +5333,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Rparen", Field, 0}, {"ParenExpr.X", Field, 0}, {"Pkg", Const, 0}, + {"Preorder", Func, 23}, {"Print", Func, 0}, {"RECV", Const, 0}, {"RangeStmt", Type, 0}, @@ -5898,7 +5924,12 @@ var PackageSymbols = map[string][]Symbol{ }, "go/types": { {"(*Alias).Obj", Method, 22}, + {"(*Alias).Origin", Method, 23}, + {"(*Alias).Rhs", Method, 23}, + {"(*Alias).SetTypeParams", Method, 23}, {"(*Alias).String", Method, 22}, + {"(*Alias).TypeArgs", Method, 23}, + {"(*Alias).TypeParams", Method, 23}, {"(*Alias).Underlying", Method, 22}, {"(*ArgumentError).Error", Method, 18}, {"(*ArgumentError).Unwrap", Method, 18}, @@ -5943,6 +5974,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Func).Pkg", Method, 5}, {"(*Func).Pos", Method, 5}, {"(*Func).Scope", Method, 5}, + {"(*Func).Signature", Method, 23}, {"(*Func).String", Method, 5}, {"(*Func).Type", Method, 5}, {"(*Info).ObjectOf", Method, 5}, @@ -6992,6 +7024,12 @@ var PackageSymbols = map[string][]Symbol{ {"TempFile", Func, 0}, {"WriteFile", Func, 0}, }, + "iter": { + {"Pull", Func, 23}, + {"Pull2", Func, 23}, + {"Seq", Type, 23}, + {"Seq2", Type, 23}, + }, "log": { {"(*Logger).Fatal", Method, 0}, {"(*Logger).Fatalf", Method, 0}, @@ -7222,11 +7260,16 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0}, }, "maps": { + {"All", Func, 23}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Copy", Func, 21}, {"DeleteFunc", Func, 21}, {"Equal", Func, 21}, {"EqualFunc", Func, 21}, + {"Insert", Func, 23}, + {"Keys", Func, 23}, + {"Values", Func, 23}, }, "math": { {"Abs", Func, 0}, @@ -7617,6 +7660,7 @@ var PackageSymbols = map[string][]Symbol{ }, "math/rand/v2": { {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, @@ -7636,6 +7680,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).NormFloat64", Method, 22}, {"(*Rand).Perm", Method, 22}, {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint", Method, 23}, {"(*Rand).Uint32", Method, 22}, {"(*Rand).Uint32N", Method, 22}, {"(*Rand).Uint64", Method, 22}, @@ -7663,6 +7708,7 @@ var PackageSymbols = map[string][]Symbol{ {"Rand", Type, 22}, {"Shuffle", Func, 22}, {"Source", Type, 22}, + {"Uint", Func, 23}, {"Uint32", Func, 22}, {"Uint32N", Func, 22}, {"Uint64", Func, 22}, @@ -7743,6 +7789,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Error", Method, 0}, {"(*DNSError).Temporary", Method, 0}, {"(*DNSError).Timeout", Method, 0}, + {"(*DNSError).Unwrap", Method, 23}, {"(*Dialer).Dial", Method, 1}, {"(*Dialer).DialContext", Method, 7}, {"(*Dialer).MultipathTCP", Method, 21}, @@ -7809,6 +7856,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*TCPConn).RemoteAddr", Method, 0}, {"(*TCPConn).SetDeadline", Method, 0}, {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAliveConfig", Method, 23}, {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, {"(*TCPConn).SetLinger", Method, 0}, {"(*TCPConn).SetNoDelay", Method, 0}, @@ -7922,6 +7970,7 @@ var PackageSymbols = map[string][]Symbol{ {"DNSError.IsTimeout", Field, 0}, {"DNSError.Name", Field, 0}, {"DNSError.Server", Field, 0}, + {"DNSError.UnwrapErr", Field, 23}, {"DefaultResolver", Var, 8}, {"Dial", Func, 0}, {"DialIP", Func, 0}, @@ -7937,6 +7986,7 @@ var PackageSymbols = map[string][]Symbol{ {"Dialer.DualStack", Field, 2}, {"Dialer.FallbackDelay", Field, 5}, {"Dialer.KeepAlive", Field, 3}, + {"Dialer.KeepAliveConfig", Field, 23}, {"Dialer.LocalAddr", Field, 1}, {"Dialer.Resolver", Field, 8}, {"Dialer.Timeout", Field, 1}, @@ -7989,10 +8039,16 @@ var PackageSymbols = map[string][]Symbol{ {"Interfaces", Func, 0}, {"InvalidAddrError", Type, 0}, {"JoinHostPort", Func, 0}, + {"KeepAliveConfig", Type, 23}, + {"KeepAliveConfig.Count", Field, 23}, + {"KeepAliveConfig.Enable", Field, 23}, + {"KeepAliveConfig.Idle", Field, 23}, + {"KeepAliveConfig.Interval", Field, 23}, {"Listen", Func, 0}, {"ListenConfig", Type, 11}, {"ListenConfig.Control", Field, 11}, {"ListenConfig.KeepAlive", Field, 13}, + {"ListenConfig.KeepAliveConfig", Field, 23}, {"ListenIP", Func, 0}, {"ListenMulticastUDP", Func, 0}, {"ListenPacket", Func, 0}, @@ -8081,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Request).Context", Method, 7}, {"(*Request).Cookie", Method, 0}, {"(*Request).Cookies", Method, 0}, + {"(*Request).CookiesNamed", Method, 23}, {"(*Request).FormFile", Method, 0}, {"(*Request).FormValue", Method, 0}, {"(*Request).MultipartReader", Method, 0}, @@ -8148,7 +8205,9 @@ var PackageSymbols = map[string][]Symbol{ {"Cookie.HttpOnly", Field, 0}, {"Cookie.MaxAge", Field, 0}, {"Cookie.Name", Field, 0}, + {"Cookie.Partitioned", Field, 23}, {"Cookie.Path", Field, 0}, + {"Cookie.Quoted", Field, 23}, {"Cookie.Raw", Field, 0}, {"Cookie.RawExpires", Field, 0}, {"Cookie.SameSite", Field, 11}, @@ -8225,7 +8284,9 @@ var PackageSymbols = map[string][]Symbol{ {"NoBody", Var, 8}, {"NotFound", Func, 0}, {"NotFoundHandler", Func, 0}, + {"ParseCookie", Func, 23}, {"ParseHTTPVersion", Func, 0}, + {"ParseSetCookie", Func, 23}, {"ParseTime", Func, 1}, {"Post", Func, 0}, {"PostForm", Func, 0}, @@ -8252,6 +8313,7 @@ var PackageSymbols = map[string][]Symbol{ {"Request.Host", Field, 0}, {"Request.Method", Field, 0}, {"Request.MultipartForm", Field, 0}, + {"Request.Pattern", Field, 23}, {"Request.PostForm", Field, 1}, {"Request.Proto", Field, 0}, {"Request.ProtoMajor", Field, 0}, @@ -8453,6 +8515,7 @@ var PackageSymbols = map[string][]Symbol{ {"DefaultRemoteAddr", Const, 0}, {"NewRecorder", Func, 0}, {"NewRequest", Func, 7}, + {"NewRequestWithContext", Func, 23}, {"NewServer", Func, 0}, {"NewTLSServer", Func, 0}, {"NewUnstartedServer", Func, 0}, @@ -8917,6 +8980,7 @@ var PackageSymbols = map[string][]Symbol{ {"Chown", Func, 0}, {"Chtimes", Func, 0}, {"Clearenv", Func, 0}, + {"CopyFS", Func, 23}, {"Create", Func, 0}, {"CreateTemp", Func, 16}, {"DevNull", Const, 0}, @@ -9150,6 +9214,7 @@ var PackageSymbols = map[string][]Symbol{ {"IsLocal", Func, 20}, {"Join", Func, 0}, {"ListSeparator", Const, 0}, + {"Localize", Func, 23}, {"Match", Func, 0}, {"Rel", Func, 0}, {"Separator", Const, 0}, @@ -9232,6 +9297,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).Pointer", Method, 0}, {"(Value).Recv", Method, 0}, {"(Value).Send", Method, 0}, + {"(Value).Seq", Method, 23}, + {"(Value).Seq2", Method, 23}, {"(Value).Set", Method, 0}, {"(Value).SetBool", Method, 0}, {"(Value).SetBytes", Method, 0}, @@ -9314,6 +9381,7 @@ var PackageSymbols = map[string][]Symbol{ {"SelectSend", Const, 1}, {"SendDir", Const, 0}, {"Slice", Const, 0}, + {"SliceAt", Func, 23}, {"SliceHeader", Type, 0}, {"SliceHeader.Cap", Field, 0}, {"SliceHeader.Data", Field, 0}, @@ -9655,6 +9723,7 @@ var PackageSymbols = map[string][]Symbol{ {"BuildSetting", Type, 18}, {"BuildSetting.Key", Field, 18}, {"BuildSetting.Value", Field, 18}, + {"CrashOptions", Type, 23}, {"FreeOSMemory", Func, 1}, {"GCStats", Type, 1}, {"GCStats.LastGC", Field, 1}, @@ -9672,6 +9741,7 @@ var PackageSymbols = map[string][]Symbol{ {"PrintStack", Func, 0}, {"ReadBuildInfo", Func, 12}, {"ReadGCStats", Func, 1}, + {"SetCrashOutput", Func, 23}, {"SetGCPercent", Func, 1}, {"SetMaxStack", Func, 2}, {"SetMaxThreads", Func, 2}, @@ -9742,10 +9812,15 @@ var PackageSymbols = map[string][]Symbol{ {"WithRegion", Func, 11}, }, "slices": { + {"All", Func, 23}, + {"AppendSeq", Func, 23}, + {"Backward", Func, 23}, {"BinarySearch", Func, 21}, {"BinarySearchFunc", Func, 21}, + {"Chunk", Func, 23}, {"Clip", Func, 21}, {"Clone", Func, 21}, + {"Collect", Func, 23}, {"Compact", Func, 21}, {"CompactFunc", Func, 21}, {"Compare", Func, 21}, @@ -9767,11 +9842,16 @@ var PackageSymbols = map[string][]Symbol{ {"MaxFunc", Func, 21}, {"Min", Func, 21}, {"MinFunc", Func, 21}, + {"Repeat", Func, 23}, {"Replace", Func, 21}, {"Reverse", Func, 21}, {"Sort", Func, 21}, {"SortFunc", Func, 21}, {"SortStableFunc", Func, 21}, + {"Sorted", Func, 23}, + {"SortedFunc", Func, 23}, + {"SortedStableFunc", Func, 23}, + {"Values", Func, 23}, }, "sort": { {"(Float64Slice).Len", Method, 0}, @@ -9936,10 +10016,14 @@ var PackageSymbols = map[string][]Symbol{ {"TrimSpace", Func, 0}, {"TrimSuffix", Func, 1}, }, + "structs": { + {"HostLayout", Type, 23}, + }, "sync": { {"(*Cond).Broadcast", Method, 0}, {"(*Cond).Signal", Method, 0}, {"(*Cond).Wait", Method, 0}, + {"(*Map).Clear", Method, 23}, {"(*Map).CompareAndDelete", Method, 20}, {"(*Map).CompareAndSwap", Method, 20}, {"(*Map).Delete", Method, 9}, @@ -9986,13 +10070,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*Bool).Store", Method, 19}, {"(*Bool).Swap", Method, 19}, {"(*Int32).Add", Method, 19}, + {"(*Int32).And", Method, 23}, {"(*Int32).CompareAndSwap", Method, 19}, {"(*Int32).Load", Method, 19}, + {"(*Int32).Or", Method, 23}, {"(*Int32).Store", Method, 19}, {"(*Int32).Swap", Method, 19}, {"(*Int64).Add", Method, 19}, + {"(*Int64).And", Method, 23}, {"(*Int64).CompareAndSwap", Method, 19}, {"(*Int64).Load", Method, 19}, + {"(*Int64).Or", Method, 23}, {"(*Int64).Store", Method, 19}, {"(*Int64).Swap", Method, 19}, {"(*Pointer).CompareAndSwap", Method, 19}, @@ -10000,18 +10088,24 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).Store", Method, 19}, {"(*Pointer).Swap", Method, 19}, {"(*Uint32).Add", Method, 19}, + {"(*Uint32).And", Method, 23}, {"(*Uint32).CompareAndSwap", Method, 19}, {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Or", Method, 23}, {"(*Uint32).Store", Method, 19}, {"(*Uint32).Swap", Method, 19}, {"(*Uint64).Add", Method, 19}, + {"(*Uint64).And", Method, 23}, {"(*Uint64).CompareAndSwap", Method, 19}, {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Or", Method, 23}, {"(*Uint64).Store", Method, 19}, {"(*Uint64).Swap", Method, 19}, {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).And", Method, 23}, {"(*Uintptr).CompareAndSwap", Method, 19}, {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Or", Method, 23}, {"(*Uintptr).Store", Method, 19}, {"(*Uintptr).Swap", Method, 19}, {"(*Value).CompareAndSwap", Method, 17}, @@ -10023,6 +10117,11 @@ var PackageSymbols = map[string][]Symbol{ {"AddUint32", Func, 0}, {"AddUint64", Func, 0}, {"AddUintptr", Func, 0}, + {"AndInt32", Func, 23}, + {"AndInt64", Func, 23}, + {"AndUint32", Func, 23}, + {"AndUint64", Func, 23}, + {"AndUintptr", Func, 23}, {"Bool", Type, 19}, {"CompareAndSwapInt32", Func, 0}, {"CompareAndSwapInt64", Func, 0}, @@ -10038,6 +10137,11 @@ var PackageSymbols = map[string][]Symbol{ {"LoadUint32", Func, 0}, {"LoadUint64", Func, 0}, {"LoadUintptr", Func, 0}, + {"OrInt32", Func, 23}, + {"OrInt64", Func, 23}, + {"OrUint32", Func, 23}, + {"OrUint64", Func, 23}, + {"OrUintptr", Func, 23}, {"Pointer", Type, 19}, {"StoreInt32", Func, 0}, {"StoreInt64", Func, 0}, @@ -16200,6 +16304,7 @@ var PackageSymbols = map[string][]Symbol{ {"WSAEACCES", Const, 2}, {"WSAECONNABORTED", Const, 9}, {"WSAECONNRESET", Const, 3}, + {"WSAENOPROTOOPT", Const, 23}, {"WSAEnumProtocols", Func, 2}, {"WSAID_CONNECTEX", Var, 1}, {"WSAIoctl", Func, 0}, @@ -17284,6 +17389,7 @@ var PackageSymbols = map[string][]Symbol{ {"Encode", Func, 0}, {"EncodeRune", Func, 0}, {"IsSurrogate", Func, 0}, + {"RuneLen", Func, 23}, }, "unicode/utf8": { {"AppendRune", Func, 18}, @@ -17306,6 +17412,11 @@ var PackageSymbols = map[string][]Symbol{ {"ValidRune", Func, 1}, {"ValidString", Func, 0}, }, + "unique": { + {"(Handle).Value", Method, 23}, + {"Handle", Type, 23}, + {"Make", Func, 23}, + }, "unsafe": { {"Add", Func, 0}, {"Alignof", Func, 0}, diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index e0c27ed251c..834e05381ce 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -1449,10 +1449,10 @@ const ( NotAGenericType // WrongTypeArgCount occurs when a type or function is instantiated with an - // incorrent number of type arguments, including when a generic type or + // incorrect number of type arguments, including when a generic type or // function is used without instantiation. // - // Errors inolving failed type inference are assigned other error codes. + // Errors involving failed type inference are assigned other error codes. // // Example: // type T[p any] int diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 7c77c2fbc03..83923286120 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,3 +48,18 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } + +// NameRelativeTo returns a types.Qualifier that qualifies members of +// all packages other than pkg, using only the package name. +// (By contrast, [types.RelativeTo] uses the complete package path, +// which is often excessive.) +// +// If pkg is nil, it is equivalent to [*types.Package.Name]. +func NameRelativeTo(pkg *types.Package) types.Qualifier { + return func(other *types.Package) string { + if pkg != nil && pkg == other { + return "" // same package; unqualified + } + return other.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go index e8180632a52..aac5db62c98 100644 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -12,7 +12,7 @@ import ( "go/types" ) -// FileVersions returns a file's Go version. +// FileVersion returns a file's Go version. // The reported version is an unknown Future version if a // version cannot be determined. func FileVersion(info *types.Info, file *ast.File) string { diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 70b57dea6a4..f0c69484583 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -217,9 +217,9 @@ type GenerateAccessTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateAccessTokenResponse struct { @@ -243,9 +243,9 @@ type GenerateAccessTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenRequest struct { @@ -277,9 +277,9 @@ type GenerateIdTokenRequest struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenResponse struct { @@ -301,9 +301,9 @@ type GenerateIdTokenResponse struct { NullFields []string `json:"-"` } -func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobRequest struct { @@ -331,9 +331,9 @@ type SignBlobRequest struct { NullFields []string `json:"-"` } -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { type NoMethod SignBlobRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobResponse struct { @@ -368,9 +368,9 @@ type SignBlobResponse struct { NullFields []string `json:"-"` } -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { type NoMethod SignBlobResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtRequest struct { @@ -402,9 +402,9 @@ type SignJwtRequest struct { NullFields []string `json:"-"` } -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { type NoMethod SignJwtRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtResponse struct { @@ -441,9 +441,9 @@ type SignJwtResponse struct { NullFields []string `json:"-"` } -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { type NoMethod SignJwtResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ProjectsServiceAccountsGenerateAccessTokenCall struct { diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b6dbace4c97..5ea555ed01a 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -42,6 +42,26 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { // Preserve old options behavior if settings.InternalCredentials != nil { @@ -80,13 +100,18 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) + if err != nil { + return nil, err + } creds, err := credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }) if err != nil { return nil, err @@ -102,7 +127,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { @@ -147,19 +172,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g var params google.CredentialsParams params.Scopes = ds.GetScopes() - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, err := getClientCertificateSource(ds) + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - params.TokenURL = oAuth2Endpoint(clientCertSource) - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 08e7aacefb6..f828ddb60e6 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -171,6 +171,10 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err if resp != nil && resp.Body != nil { resp.Body.Close() } + // If there were retries, indicate this in the error message and wrap the final error. + if rx.attempts > 1 { + return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err) + } return nil, err } // This case is very unlikely but possible only if rx.ChunkRetryDeadline is diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 20b57d925f1..089ee3189ba 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -8,6 +8,7 @@ import ( "errors" "io" "net" + "net/url" "strings" "time" @@ -29,8 +30,6 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } - // syscallRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -56,30 +55,33 @@ func shouldRetry(status int, err error) bool { if status == statusTooManyRequests || status == statusRequestTimeout { return true } - if err == io.ErrUnexpectedEOF { + if errors.Is(err, io.ErrUnexpectedEOF) { return true } - // Transient network errors should be retried. - if syscallRetryable(err) { + if errors.Is(err, net.ErrClosed) { return true } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true + switch e := err.(type) { + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } } - } - var opErr *net.OpError - if errors.As(err, &opErr) { - if strings.Contains(opErr.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string + case interface{ Temporary() bool }: + if e.Temporary() { return true } } - // If Go 1.13 error unwrapping is available, use this to examine wrapped + // If error unwrapping is available, use this to examine wrapped // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) + if e, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, e.Unwrap()) } return false } diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go deleted file mode 100644 index a916c3da29b..00000000000 --- a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package gensupport - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f39dd00d99f..f6716134ebf 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -48,8 +48,24 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx != nil { headers := callctx.HeadersFromContext(ctx) for k, vals := range headers { - for _, v := range vals { - req.Header.Add(k, v) + if k == "x-goog-api-client" { + // Merge all values into a single "x-goog-api-client" header. + var mergedVal strings.Builder + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + if baseXGoogHeader != "" { + mergedVal.WriteString(baseXGoogHeader) + mergedVal.WriteRune(' ') + } + for _, v := range vals { + mergedVal.WriteString(v) + mergedVal.WriteRune(' ') + } + // Remove the last space and replace the header on the request. + req.Header.Set(k, mergedVal.String()[:mergedVal.Len()-1]) + } else { + for _, v := range vals { + req.Header.Add(k, v) + } } } } @@ -118,7 +134,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var err error attempts := 1 invocationID := uuid.New().String() - baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + + xGoogHeaderVals := req.Header.Values("X-Goog-Api-Client") + baseXGoogHeader := strings.Join(xGoogHeaderVals, " ") // Loop to retry the request, up to the context deadline. var pause time.Duration diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 6d0c18e5a87..edba49af499 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -126,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 28c324a6476..54ae69fa817 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.181.0" +const Version = "0.190.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 1b4be6993dd..78e9eb1c7e2 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -27,13 +27,23 @@ "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" } ], - "etag": "\"33303333323233383838323039393532373539\"", + "etag": "\"323732353932323032353837333633313231\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -681,6 +691,38 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "getStorageLayout": { + "description": "Returns the storage layout configuration for the specified bucket. Note that this operation requires storage.objects.list permission.", + "httpMethod": "GET", + "id": "storage.buckets.getStorageLayout", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "prefix": { + "description": "An optional prefix used for permission check. It is useful when the caller only has storage.objects.list permission under a specific prefix.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/storageLayout", + "response": { + "$ref": "BucketStorageLayout" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "insert": { "description": "Creates a new bucket.", "httpMethod": "POST", @@ -4043,7 +4085,7 @@ } } }, - "revision": "20240319", + "revision": "20240706", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -4305,6 +4347,49 @@ "description": "The ID of the bucket. For buckets, the id and name properties are the same.", "type": "string" }, + "ipFilter": { + "description": "The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'.", + "properties": { + "mode": { + "description": "The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'.", + "type": "string" + }, + "publicNetworkSource": { + "description": "The public network source of the bucket's IP filter.", + "properties": { + "allowedIpCidrRanges": { + "description": "The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "vpcNetworkSources": { + "description": "The list of [VPC network](https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter.", + "items": { + "properties": { + "allowedIpCidrRanges": { + "description": "The list of IPv4, IPv6 cidr ranges subnetworks that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + }, + "network": { + "description": "Name of the network. Format: projects/{PROJECT_ID}/global/networks/{NETWORK_NAME}", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, "kind": { "default": "storage#bucket", "description": "The kind of item this is. For buckets, this is always storage#bucket.", @@ -4662,6 +4747,53 @@ }, "type": "object" }, + "BucketStorageLayout": { + "description": "The storage layout configuration of a bucket.", + "id": "BucketStorageLayout", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "customPlacementConfig": { + "description": "The bucket's custom placement configuration for Custom Dual Regions.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "hierarchicalNamespace": { + "description": "The bucket's hierarchical namespace configuration.", + "properties": { + "enabled": { + "description": "When set to true, hierarchical namespace is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "kind": { + "default": "storage#storageLayout", + "description": "The kind of item this is. For storage layout, this is always storage#storageLayout.", + "type": "string" + }, + "location": { + "description": "The location of the bucket.", + "type": "string" + }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + } + }, + "type": "object" + }, "Buckets": { "description": "A list of buckets.", "id": "Buckets", @@ -4928,6 +5060,11 @@ "description": "The response message for storage.buckets.operations.list.", "id": "GoogleLongrunningListOperationsResponse", "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, "nextPageToken": { "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" @@ -4954,6 +5091,11 @@ "$ref": "GoogleRpcStatus", "description": "The error result of the operation in case of failure or cancellation." }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, "metadata": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -4973,6 +5115,10 @@ }, "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 6a5db35fadb..07de6ebf63c 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -388,9 +388,9 @@ type AnywhereCache struct { NullFields []string `json:"-"` } -func (s *AnywhereCache) MarshalJSON() ([]byte, error) { +func (s AnywhereCache) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCache - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnywhereCaches: A list of Anywhere Caches. @@ -420,9 +420,9 @@ type AnywhereCaches struct { NullFields []string `json:"-"` } -func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { +func (s AnywhereCaches) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCaches - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Bucket: A bucket. @@ -466,6 +466,10 @@ type Bucket struct { // Id: The ID of the bucket. For buckets, the id and name properties are the // same. Id string `json:"id,omitempty"` + // IpFilter: The bucket's IP filter configuration. Specifies the network + // sources that are allowed to access the operations on the bucket, as well as + // its underlying objects. Only enforced when the mode is set to 'Enabled'. + IpFilter *BucketIpFilter `json:"ipFilter,omitempty"` // Kind: The kind of item this is. For buckets, this is always storage#bucket. Kind string `json:"kind,omitempty"` // Labels: User-provided labels, in key/value pairs. @@ -548,9 +552,9 @@ type Bucket struct { NullFields []string `json:"-"` } -func (s *Bucket) MarshalJSON() ([]byte, error) { +func (s Bucket) MarshalJSON() ([]byte, error) { type NoMethod Bucket - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAutoclass: The bucket's Autoclass configuration. @@ -580,9 +584,9 @@ type BucketAutoclass struct { NullFields []string `json:"-"` } -func (s *BucketAutoclass) MarshalJSON() ([]byte, error) { +func (s BucketAutoclass) MarshalJSON() ([]byte, error) { type NoMethod BucketAutoclass - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketBilling: The bucket's billing configuration. @@ -602,9 +606,9 @@ type BucketBilling struct { NullFields []string `json:"-"` } -func (s *BucketBilling) MarshalJSON() ([]byte, error) { +func (s BucketBilling) MarshalJSON() ([]byte, error) { type NoMethod BucketBilling - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketCors struct { @@ -634,9 +638,9 @@ type BucketCors struct { NullFields []string `json:"-"` } -func (s *BucketCors) MarshalJSON() ([]byte, error) { +func (s BucketCors) MarshalJSON() ([]byte, error) { type NoMethod BucketCors - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketCustomPlacementConfig: The bucket's custom placement configuration for @@ -657,9 +661,9 @@ type BucketCustomPlacementConfig struct { NullFields []string `json:"-"` } -func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketCustomPlacementConfig - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketEncryption: Encryption configuration for a bucket. @@ -680,9 +684,9 @@ type BucketEncryption struct { NullFields []string `json:"-"` } -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { +func (s BucketEncryption) MarshalJSON() ([]byte, error) { type NoMethod BucketEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketHierarchicalNamespace: The bucket's hierarchical namespace @@ -704,9 +708,9 @@ type BucketHierarchicalNamespace struct { NullFields []string `json:"-"` } -func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketHierarchicalNamespace - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfiguration: The bucket's IAM configuration. @@ -736,9 +740,9 @@ type BucketIamConfiguration struct { NullFields []string `json:"-"` } -func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { +func (s BucketIamConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfiguration - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationBucketPolicyOnly: The bucket's uniform bucket-level @@ -768,9 +772,9 @@ type BucketIamConfigurationBucketPolicyOnly struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationBucketPolicyOnly - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform @@ -798,9 +802,87 @@ type BucketIamConfigurationUniformBucketLevelAccess struct { NullFields []string `json:"-"` } -func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationUniformBucketLevelAccess - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilter: The bucket's IP filter configuration. Specifies the network +// sources that are allowed to access the operations on the bucket, as well as +// its underlying objects. Only enforced when the mode is set to 'Enabled'. +type BucketIpFilter struct { + // Mode: The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. + Mode string `json:"mode,omitempty"` + // PublicNetworkSource: The public network source of the bucket's IP filter. + PublicNetworkSource *BucketIpFilterPublicNetworkSource `json:"publicNetworkSource,omitempty"` + // VpcNetworkSources: The list of VPC network + // (https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter. + VpcNetworkSources []*BucketIpFilterVpcNetworkSources `json:"vpcNetworkSources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilter) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilter + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilterPublicNetworkSource: The public network source of the bucket's +// IP filter. +type BucketIpFilterPublicNetworkSource struct { + // AllowedIpCidrRanges: The list of public IPv4, IPv6 cidr ranges that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterPublicNetworkSource) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterPublicNetworkSource + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketIpFilterVpcNetworkSources struct { + // AllowedIpCidrRanges: The list of IPv4, IPv6 cidr ranges subnetworks that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // Network: Name of the network. Format: + // projects/{PROJECT_ID}/global/networks/{NETWORK_NAME} + Network string `json:"network,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterVpcNetworkSources) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterVpcNetworkSources + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle @@ -822,9 +904,9 @@ type BucketLifecycle struct { NullFields []string `json:"-"` } -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { +func (s BucketLifecycle) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycle - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketLifecycleRule struct { @@ -845,9 +927,9 @@ type BucketLifecycleRule struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRule) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRule - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleAction: The action to take. @@ -871,9 +953,9 @@ type BucketLifecycleRuleAction struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleAction - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleCondition: The condition(s) under which the action will @@ -947,9 +1029,9 @@ type BucketLifecycleRuleCondition struct { NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleCondition - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLogging: The bucket's logging configuration, which defines the @@ -973,9 +1055,9 @@ type BucketLogging struct { NullFields []string `json:"-"` } -func (s *BucketLogging) MarshalJSON() ([]byte, error) { +func (s BucketLogging) MarshalJSON() ([]byte, error) { type NoMethod BucketLogging - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketObjectRetention: The bucket's object retention config. @@ -995,9 +1077,9 @@ type BucketObjectRetention struct { NullFields []string `json:"-"` } -func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { +func (s BucketObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod BucketObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketOwner: The owner of the bucket. This is always the project team's @@ -1020,9 +1102,9 @@ type BucketOwner struct { NullFields []string `json:"-"` } -func (s *BucketOwner) MarshalJSON() ([]byte, error) { +func (s BucketOwner) MarshalJSON() ([]byte, error) { type NoMethod BucketOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketRetentionPolicy: The bucket's retention policy. The retention policy @@ -1058,9 +1140,9 @@ type BucketRetentionPolicy struct { NullFields []string `json:"-"` } -func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { +func (s BucketRetentionPolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketRetentionPolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketSoftDeletePolicy: The bucket's soft delete policy, which defines the @@ -1087,9 +1169,9 @@ type BucketSoftDeletePolicy struct { NullFields []string `json:"-"` } -func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { +func (s BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketSoftDeletePolicy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketVersioning: The bucket's versioning configuration. @@ -1109,9 +1191,9 @@ type BucketVersioning struct { NullFields []string `json:"-"` } -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { +func (s BucketVersioning) MarshalJSON() ([]byte, error) { type NoMethod BucketVersioning - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketWebsite: The bucket's website configuration, controlling how the @@ -1140,9 +1222,9 @@ type BucketWebsite struct { NullFields []string `json:"-"` } -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { +func (s BucketWebsite) MarshalJSON() ([]byte, error) { type NoMethod BucketWebsite - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControl: An access-control entry. @@ -1199,9 +1281,9 @@ type BucketAccessControl struct { NullFields []string `json:"-"` } -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { +func (s BucketAccessControl) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControlProjectTeam: The project team associated with the entity, @@ -1224,9 +1306,9 @@ type BucketAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControls: An access-control list. @@ -1252,9 +1334,93 @@ type BucketAccessControls struct { NullFields []string `json:"-"` } -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { +func (s BucketAccessControls) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayout: The storage layout configuration of a bucket. +type BucketStorageLayout struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + // CustomPlacementConfig: The bucket's custom placement configuration for + // Custom Dual Regions. + CustomPlacementConfig *BucketStorageLayoutCustomPlacementConfig `json:"customPlacementConfig,omitempty"` + // HierarchicalNamespace: The bucket's hierarchical namespace configuration. + HierarchicalNamespace *BucketStorageLayoutHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` + // Kind: The kind of item this is. For storage layout, this is always + // storage#storageLayout. + Kind string `json:"kind,omitempty"` + // Location: The location of the bucket. + Location string `json:"location,omitempty"` + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayout) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayout + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutCustomPlacementConfig: The bucket's custom placement +// configuration for Custom Dual Regions. +type BucketStorageLayoutCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutHierarchicalNamespace: The bucket's hierarchical +// namespace configuration. +type BucketStorageLayoutHierarchicalNamespace struct { + // Enabled: When set to true, hierarchical namespace is enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutHierarchicalNamespace + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Buckets: A list of buckets. @@ -1284,9 +1450,9 @@ type Buckets struct { NullFields []string `json:"-"` } -func (s *Buckets) MarshalJSON() ([]byte, error) { +func (s Buckets) MarshalJSON() ([]byte, error) { type NoMethod Buckets - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkRestoreObjectsRequest: A bulk restore objects request. @@ -1325,9 +1491,9 @@ type BulkRestoreObjectsRequest struct { NullFields []string `json:"-"` } -func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { +func (s BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { type NoMethod BulkRestoreObjectsRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. @@ -1373,9 +1539,9 @@ type Channel struct { NullFields []string `json:"-"` } -func (s *Channel) MarshalJSON() ([]byte, error) { +func (s Channel) MarshalJSON() ([]byte, error) { type NoMethod Channel - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequest: A Compose request. @@ -1400,9 +1566,9 @@ type ComposeRequest struct { NullFields []string `json:"-"` } -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { +func (s ComposeRequest) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequest - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ComposeRequestSourceObjects struct { @@ -1427,9 +1593,9 @@ type ComposeRequestSourceObjects struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequestSourceObjectsObjectPreconditions: Conditions that must be met @@ -1453,9 +1619,9 @@ type ComposeRequestSourceObjectsObjectPreconditions struct { NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjectsObjectPreconditions - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Expr: Represents an expression text. Example: title: "User account presence" @@ -1489,9 +1655,9 @@ type Expr struct { NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folder: A folder. Only available in buckets with hierarchical namespace @@ -1534,9 +1700,9 @@ type Folder struct { NullFields []string `json:"-"` } -func (s *Folder) MarshalJSON() ([]byte, error) { +func (s Folder) MarshalJSON() ([]byte, error) { type NoMethod Folder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // FolderPendingRenameInfo: Only present if the folder is part of an ongoing @@ -1558,9 +1724,9 @@ type FolderPendingRenameInfo struct { NullFields []string `json:"-"` } -func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { +func (s FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { type NoMethod FolderPendingRenameInfo - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folders: A list of folders. @@ -1590,14 +1756,17 @@ type Folders struct { NullFields []string `json:"-"` } -func (s *Folders) MarshalJSON() ([]byte, error) { +func (s Folders) MarshalJSON() ([]byte, error) { type NoMethod Folders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningListOperationsResponse: The response message for // storage.buckets.operations.list. type GoogleLongrunningListOperationsResponse struct { + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large result // sets. Provide this value in a subsequent request to return the next page of // results. @@ -1608,22 +1777,22 @@ type GoogleLongrunningListOperationsResponse struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningListOperationsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningOperation: This resource represents a long-running @@ -1635,6 +1804,9 @@ type GoogleLongrunningOperation struct { Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or cancellation. Error *GoogleRpcStatus `json:"error,omitempty"` + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` // Metadata: Service-specific metadata associated with the operation. It // typically contains progress information and common metadata such as create // time. Some services might not provide such metadata. Any method that returns @@ -1652,6 +1824,8 @@ type GoogleLongrunningOperation struct { // method name. For example, if the original method name is "TakeSnapshot()", // the inferred response type is "TakeSnapshotResponse". Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -1668,9 +1842,9 @@ type GoogleLongrunningOperation struct { NullFields []string `json:"-"` } -func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningOperation - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleRpcStatus: The "Status" type defines a logical error model that is @@ -1700,9 +1874,9 @@ type GoogleRpcStatus struct { NullFields []string `json:"-"` } -func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleRpcStatus - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKey: JSON template to produce a JSON-style HMAC Key resource for Create @@ -1731,9 +1905,9 @@ type HmacKey struct { NullFields []string `json:"-"` } -func (s *HmacKey) MarshalJSON() ([]byte, error) { +func (s HmacKey) MarshalJSON() ([]byte, error) { type NoMethod HmacKey - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key metadata @@ -1779,9 +1953,9 @@ type HmacKeyMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeyMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeyMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeysMetadata: A list of hmacKeys. @@ -1811,9 +1985,9 @@ type HmacKeysMetadata struct { NullFields []string `json:"-"` } -func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeysMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeysMetadata - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolder: A managed folder. @@ -1855,9 +2029,9 @@ type ManagedFolder struct { NullFields []string `json:"-"` } -func (s *ManagedFolder) MarshalJSON() ([]byte, error) { +func (s ManagedFolder) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolder - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolders: A list of managed folders. @@ -1887,9 +2061,9 @@ type ManagedFolders struct { NullFields []string `json:"-"` } -func (s *ManagedFolders) MarshalJSON() ([]byte, error) { +func (s ManagedFolders) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolders - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notification: A subscription to receive Google PubSub notifications. @@ -1934,9 +2108,9 @@ type Notification struct { NullFields []string `json:"-"` } -func (s *Notification) MarshalJSON() ([]byte, error) { +func (s Notification) MarshalJSON() ([]byte, error) { type NoMethod Notification - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notifications: A list of notification subscriptions. @@ -1962,9 +2136,9 @@ type Notifications struct { NullFields []string `json:"-"` } -func (s *Notifications) MarshalJSON() ([]byte, error) { +func (s Notifications) MarshalJSON() ([]byte, error) { type NoMethod Notifications - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Object: An object. @@ -2104,9 +2278,9 @@ type Object struct { NullFields []string `json:"-"` } -func (s *Object) MarshalJSON() ([]byte, error) { +func (s Object) MarshalJSON() ([]byte, error) { type NoMethod Object - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if @@ -2129,9 +2303,9 @@ type ObjectCustomerEncryption struct { NullFields []string `json:"-"` } -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { +func (s ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { type NoMethod ObjectCustomerEncryption - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectOwner: The owner of the object. This will always be the uploader of @@ -2154,9 +2328,9 @@ type ObjectOwner struct { NullFields []string `json:"-"` } -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { +func (s ObjectOwner) MarshalJSON() ([]byte, error) { type NoMethod ObjectOwner - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectRetention: A collection of object level retention parameters. @@ -2179,9 +2353,9 @@ type ObjectRetention struct { NullFields []string `json:"-"` } -func (s *ObjectRetention) MarshalJSON() ([]byte, error) { +func (s ObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod ObjectRetention - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControl: An access-control entry. @@ -2242,9 +2416,9 @@ type ObjectAccessControl struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControl) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControl - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControlProjectTeam: The project team associated with the entity, @@ -2267,9 +2441,9 @@ type ObjectAccessControlProjectTeam struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControlProjectTeam - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControls: An access-control list. @@ -2295,9 +2469,9 @@ type ObjectAccessControls struct { NullFields []string `json:"-"` } -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControls) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControls - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Objects: A list of objects. @@ -2330,9 +2504,9 @@ type Objects struct { NullFields []string `json:"-"` } -func (s *Objects) MarshalJSON() ([]byte, error) { +func (s Objects) MarshalJSON() ([]byte, error) { type NoMethod Objects - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: A bucket/object/managedFolder IAM policy. @@ -2371,9 +2545,9 @@ type Policy struct { NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyBindings struct { @@ -2443,9 +2617,9 @@ type PolicyBindings struct { NullFields []string `json:"-"` } -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { +func (s PolicyBindings) MarshalJSON() ([]byte, error) { type NoMethod PolicyBindings - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RewriteResponse: A rewrite response. @@ -2485,9 +2659,9 @@ type RewriteResponse struct { NullFields []string `json:"-"` } -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { +func (s RewriteResponse) MarshalJSON() ([]byte, error) { type NoMethod RewriteResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ServiceAccount: A subscription to receive Google PubSub notifications. @@ -2513,9 +2687,9 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: A @@ -2564,9 +2738,9 @@ type TestIamPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - return gensupport.MarshalJSON(NoMethod(*s), s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type AnywhereCachesDisableCall struct { @@ -4350,6 +4524,123 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err return ret, nil } +type BucketsGetStorageLayoutCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStorageLayout: Returns the storage layout configuration for the specified +// bucket. Note that this operation requires storage.objects.list permission. +// +// - bucket: Name of a bucket. +func (r *BucketsService) GetStorageLayout(bucket string) *BucketsGetStorageLayoutCall { + c := &BucketsGetStorageLayoutCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Prefix sets the optional parameter "prefix": An optional prefix used for +// permission check. It is useful when the caller only has storage.objects.list +// permission under a specific prefix. +func (c *BucketsGetStorageLayoutCall) Prefix(prefix string) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsGetStorageLayoutCall) Fields(s ...googleapi.Field) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsGetStorageLayoutCall) IfNoneMatch(entityTag string) *BucketsGetStorageLayoutCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsGetStorageLayoutCall) Context(ctx context.Context) *BucketsGetStorageLayoutCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsGetStorageLayoutCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getStorageLayout" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketStorageLayout.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketStorageLayout, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketStorageLayout{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type BucketsInsertCall struct { s *Service bucket *Bucket diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 2e66d02b378..2f6359f2921 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -53,6 +53,9 @@ var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} // Assign to var for unit test replacement var dialContext = grpc.DialContext +// Assign to var for unit test replacement +var dialContextNewAuth = grpctransport.Dial + // otelStatsHandler is a singleton otelgrpc.clientHandler to be used across // all dial connections to avoid the memory leak documented in // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 @@ -218,20 +221,20 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna defaultEndpointTemplate = ds.DefaultEndpoint } - pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ + pool, err := dialContextNewAuth(ctx, secure, &grpctransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, Endpoint: ds.Endpoint, Metadata: metadata, - GRPCDialOpts: ds.GRPCDialOpts, + GRPCDialOpts: prepareDialOptsNewAuth(ds), PoolSize: poolSize, Credentials: creds, + APIKey: ds.APIKey, DetectOpts: &credentials.DetectOptions{ Scopes: ds.Scopes, Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -248,6 +251,15 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna return pool, err } +func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption { + var opts []grpc.DialOption + if ds.UserAgent != "" { + opts = append(opts, grpc.WithUserAgent(ds.UserAgent)) + } + + return append(opts, ds.GRPCDialOpts...) +} + func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index d1cd83b62d9..3747d0df0b2 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -121,7 +121,6 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index 191bea48c86..8b462f3dfee 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/api/annotations.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 10f35d10e52..fe19e8f97a7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -1024,6 +1024,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1040,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1055,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 312d7eb49a9..08505ba3fec 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index 6ff36206dac..a462e7d0132 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 8a0e1c345b9..ffb5838cb18 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/http.proto package annotations @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index bbcc12d29c8..b5db279aebf 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/resource.proto package annotations @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 9a9ae04c29a..1d8397b02b4 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/routing.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go index aa640dc31cd..6e01be017c8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/distribution.proto package distribution diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go index 137ff5b1e94..9f81dbcd86e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go index ca4415956f0..0a2ffb5955d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go index 3f994b4e3e0..57aaa2c9f51 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index 0d718fc36d1..6b867a46ede 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go index 033f238684f..0a5ca6a1b9f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 3543268f84e..e7d3805e365 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go index 75397e1b14f..42bcacc3635 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/label.proto package label diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 454948669dc..498020e33cd 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/launch_stage.proto package api diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index 2af7d129ba6..d4b89c98d19 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/metric.proto package metric diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go index b66442b1862..b4cee29803c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go new file mode 100644 index 00000000000..db70a101b87 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go @@ -0,0 +1,631 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/cloud/location/locations.proto + +package location + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The request message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource that owns the locations collection, if applicable. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The standard list filter. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListLocationsRequest) Reset() { + *x = ListLocationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsRequest) ProtoMessage() {} + +func (x *ListLocationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsRequest.ProtoReflect.Descriptor instead. +func (*ListLocationsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{0} +} + +func (x *ListLocationsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListLocationsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListLocationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListLocationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The response message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of locations that matches the specified filter in the request. + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListLocationsResponse) Reset() { + *x = ListLocationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsResponse) ProtoMessage() {} + +func (x *ListLocationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsResponse.ProtoReflect.Descriptor instead. +func (*ListLocationsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{1} +} + +func (x *ListLocationsResponse) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +func (x *ListLocationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The request message for [Locations.GetLocation][google.cloud.location.Locations.GetLocation]. +type GetLocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetLocationRequest) Reset() { + *x = GetLocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetLocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLocationRequest) ProtoMessage() {} + +func (x *GetLocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLocationRequest.ProtoReflect.Descriptor instead. +func (*GetLocationRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{2} +} + +func (x *GetLocationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// A resource that represents Google Cloud Platform location. +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location, which may vary between implementations. + // For example: `"projects/example-project/locations/us-east1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The canonical id for this location. For example: `"us-east1"`. + LocationId string `protobuf:"bytes,4,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"` + // The friendly name for this location, typically a nearby city name. + // For example, "Tokyo". + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Cross-service attributes for the location. For example + // + // {"cloud.googleapis.com/region": "us-east1"} + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Service-specific metadata. For example the available capacity at the given + // location. + Metadata *anypb.Any `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{3} +} + +func (x *Location) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Location) GetLocationId() string { + if x != nil { + return x.LocationId + } + return "" +} + +func (x *Location) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Location) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Location) GetMetadata() *anypb.Any { + if x != nil { + return x.Metadata + } + return nil +} + +var File_google_cloud_location_locations_proto protoreflect.FileDescriptor + +var file_google_cloud_location_locations_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x7e, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x7e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x02, 0x0a, 0x08, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x32, 0xa4, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0xab, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x39, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9e, 0x01, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x3d, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0x48, + 0xca, 0x41, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x6f, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_cloud_location_locations_proto_rawDescOnce sync.Once + file_google_cloud_location_locations_proto_rawDescData = file_google_cloud_location_locations_proto_rawDesc +) + +func file_google_cloud_location_locations_proto_rawDescGZIP() []byte { + file_google_cloud_location_locations_proto_rawDescOnce.Do(func() { + file_google_cloud_location_locations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_location_locations_proto_rawDescData) + }) + return file_google_cloud_location_locations_proto_rawDescData +} + +var file_google_cloud_location_locations_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_cloud_location_locations_proto_goTypes = []interface{}{ + (*ListLocationsRequest)(nil), // 0: google.cloud.location.ListLocationsRequest + (*ListLocationsResponse)(nil), // 1: google.cloud.location.ListLocationsResponse + (*GetLocationRequest)(nil), // 2: google.cloud.location.GetLocationRequest + (*Location)(nil), // 3: google.cloud.location.Location + nil, // 4: google.cloud.location.Location.LabelsEntry + (*anypb.Any)(nil), // 5: google.protobuf.Any +} +var file_google_cloud_location_locations_proto_depIdxs = []int32{ + 3, // 0: google.cloud.location.ListLocationsResponse.locations:type_name -> google.cloud.location.Location + 4, // 1: google.cloud.location.Location.labels:type_name -> google.cloud.location.Location.LabelsEntry + 5, // 2: google.cloud.location.Location.metadata:type_name -> google.protobuf.Any + 0, // 3: google.cloud.location.Locations.ListLocations:input_type -> google.cloud.location.ListLocationsRequest + 2, // 4: google.cloud.location.Locations.GetLocation:input_type -> google.cloud.location.GetLocationRequest + 1, // 5: google.cloud.location.Locations.ListLocations:output_type -> google.cloud.location.ListLocationsResponse + 3, // 6: google.cloud.location.Locations.GetLocation:output_type -> google.cloud.location.Location + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_cloud_location_locations_proto_init() } +func file_google_cloud_location_locations_proto_init() { + if File_google_cloud_location_locations_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_location_locations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_location_locations_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_location_locations_proto_goTypes, + DependencyIndexes: file_google_cloud_location_locations_proto_depIdxs, + MessageInfos: file_google_cloud_location_locations_proto_msgTypes, + }.Build() + File_google_cloud_location_locations_proto = out.File + file_google_cloud_location_locations_proto_rawDesc = nil + file_google_cloud_location_locations_proto_goTypes = nil + file_google_cloud_location_locations_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// LocationsClient is the client API for Locations service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LocationsClient interface { + // Lists information about the supported locations for this service. + ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) +} + +type locationsClient struct { + cc grpc.ClientConnInterface +} + +func NewLocationsClient(cc grpc.ClientConnInterface) LocationsClient { + return &locationsClient{cc} +} + +func (c *locationsClient) ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) { + out := new(ListLocationsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/ListLocations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *locationsClient) GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) { + out := new(Location) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/GetLocation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LocationsServer is the server API for Locations service. +type LocationsServer interface { + // Lists information about the supported locations for this service. + ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(context.Context, *GetLocationRequest) (*Location, error) +} + +// UnimplementedLocationsServer can be embedded to have forward compatible implementations. +type UnimplementedLocationsServer struct { +} + +func (*UnimplementedLocationsServer) ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLocations not implemented") +} +func (*UnimplementedLocationsServer) GetLocation(context.Context, *GetLocationRequest) (*Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLocation not implemented") +} + +func RegisterLocationsServer(s *grpc.Server, srv LocationsServer) { + s.RegisterService(&_Locations_serviceDesc, srv) +} + +func _Locations_ListLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLocationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).ListLocations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/ListLocations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).ListLocations(ctx, req.(*ListLocationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Locations_GetLocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).GetLocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/GetLocation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).GetLocation(ctx, req.(*GetLocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Locations_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.location.Locations", + HandlerType: (*LocationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListLocations", + Handler: _Locations_ListLocations_Handler, + }, + { + MethodName: "GetLocation", + Handler: _Locations_GetLocation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/location/locations.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go index 54de5458c6c..cae02bce14f 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/type/calendar_period.proto package calendarperiod diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go index 72afd8b000e..c7bef08aadd 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/type/date.proto package date diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 38ef56f73ca..7d57f34b4f5 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/type/expr.proto package expr diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1ac5..0854d298e41 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `./scripts/vet.sh` to catch vet errors - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index c6672c0a3ef..6a8a07781ae 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,6 +9,7 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [atollena](https://github.com/atollena), Datadog, Inc. - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f8960922b3..be38384ff6f 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -30,17 +30,20 @@ testdeps: GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ build \ clean \ + deps \ proto \ test \ + testsubmodule \ testrace \ + testdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79b86..b572707c623 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 32989b3abb2..0adc98866c0 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index d8ec6539d2a..57a792a7b48 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -58,11 +58,12 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{stream} + x := &loadBalancerBalanceLoadClient{ClientStream: stream} return x, nil } @@ -116,7 +117,7 @@ func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) } type LoadBalancer_BalanceLoadServer interface { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index 8942c31310a..96a57c8c70c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -21,14 +21,14 @@ package grpclb import ( "encoding/json" - "google.golang.org/grpc" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName + pickFirstName = pickfirst.Name ) type grpclbServiceConfig struct { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 20c5f2ec396..671bc663fcb 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,13 +19,13 @@ package grpclb import ( + "math/rand" "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), stats: stats, } } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 74% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index e3ea42ba962..07527603f1d 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,26 +16,36 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} @@ -47,14 +57,14 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to + // of endpoints received from the name resolver before attempting to // connect to them. ShuffleAddressList bool `json:"shuffleAddressList"` } @@ -93,9 +103,14 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -107,22 +122,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig // already does so. cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forwarrd the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad2251..260255d31b6 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c73..4161fdf47a8 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -198,6 +198,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 856c75dd4e2..63c639e4fe9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index c7f2607114a..423be7b43b0 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,13 +31,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -73,6 +73,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -121,8 +123,9 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns // resolver, a "dns:///" prefix should be applied to the target. // -// The DialOptions returned by WithBlock, WithTimeout, and -// WithReturnConnectionError are ignored by this function. +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, @@ -152,6 +155,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +173,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,25 +181,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) + if err = cc.initAuthority(); err != nil { return nil, err } + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) @@ -196,6 +200,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } // Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } @@ -209,6 +215,8 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // "passthrough" for backward compatibility. This distinction should not matter // to most users, but could matter to legacy users that specify a custom dialer // and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. @@ -583,11 +591,11 @@ type ClientConn struct { // The following are initialized at dial time, and are read-only after that. target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). dopts dialOptions // Default and user specified dial options. channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -688,8 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -838,6 +845,9 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", @@ -929,10 +939,14 @@ func equalAddresses(a, b []resolver.Address) bool { // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - addrs = copyAddressesWithoutBalancerAttributes(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1167,6 +1181,10 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. @@ -1292,6 +1310,7 @@ func (ac *addrConn) resetTransport() { func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) if ctx.Err() != nil { return errConnClosing } @@ -1657,22 +1676,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1691,15 +1707,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1739,7 +1752,7 @@ func encodeAuthority(authority string) string { return false case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters return false - case ':', '[', ']', '@': // Authority related delimeters + case ':', '[', ']', '@': // Authority related delimiters return false } // Everything else must be escaped. @@ -1789,7 +1802,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1822,6 +1835,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md index 4758125de0d..711d20edb2e 100644 --- a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md @@ -13,9 +13,16 @@ change from the grpc code generator previously included with `protoc-gen-go`. To restore this behavior, set the option `require_unimplemented_servers=false`. E.g.: -``` - protoc --go-grpc_out=require_unimplemented_servers=false[,other options...]:. \ +```sh + protoc --go-grpc_out=. --go-grpc_opt=require_unimplemented_servers=false[,other options...] \ ``` Note that this is not recommended, and the option is only provided to restore backward compatibility with previously-generated code. + +When embedding the `UnimplementedServer` in a struct that +implements the service, it should be embedded by _value_ instead of as a +_pointer_. If it is embedded as a pointer, it must be assigned to a valid, +non-nil pointer or else unimplemented methods would panic when called. This is +tested at service registration time, and will lead to a panic in +`RegisterServer` if it is not embedded properly. diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go index 9e15d2d8daf..abc21602292 100644 --- a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go @@ -52,6 +52,10 @@ func (serviceGenerateHelper) formatFullMethodSymbol(service *protogen.Service, m } func (serviceGenerateHelper) genFullMethods(g *protogen.GeneratedFile, service *protogen.Service) { + if len(service.Methods) == 0 { + return + } + g.P("const (") for _, method := range service.Methods { fmSymbol := helper.formatFullMethodSymbol(service, method) @@ -80,9 +84,12 @@ func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugi mustOrShould = "should" } // Server Unimplemented struct for forward compatibility. - g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") - g.P("type Unimplemented", serverType, " struct {") - g.P("}") + g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have") + g.P("// forward compatible implementations.") + g.P("//") + g.P("// NOTE: this should be embedded by value instead of pointer to avoid a nil") + g.P("// pointer dereference when methods are called.") + g.P("type Unimplemented", serverType, " struct {}") g.P() for _, method := range service.Methods { nilArg := "" @@ -96,6 +103,7 @@ func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugi if *requireUnimplemented { g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") } + g.P("func (Unimplemented", serverType, ") testEmbeddedByValue() {}") g.P() } @@ -170,14 +178,30 @@ func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen. g.P("// This is a compile-time assertion to ensure that this generated file") g.P("// is compatible with the grpc package it is being compiled against.") - g.P("// Requires gRPC-Go v1.32.0 or later.") - g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion7")) // When changing, update version number above. + if *useGenericStreams { + g.P("// Requires gRPC-Go v1.64.0 or later.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion9")) + } else { + g.P("// Requires gRPC-Go v1.62.0 or later.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion8")) // When changing, update version number above. + } g.P() for _, service := range file.Services { genService(gen, file, g, service) } } +// genServiceComments copies the comments from the RPC proto definitions +// to the corresponding generated interface file. +func genServiceComments(g *protogen.GeneratedFile, service *protogen.Service) { + if service.Comments.Leading != "" { + // Add empty comment line to attach this service's comments to + // the godoc comments previously output for all services. + g.P("//") + g.P(strings.TrimSpace(service.Comments.Leading.String())) + } +} + func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { // Full methods constants. helper.genFullMethods(g, service) @@ -189,14 +213,17 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P("//") g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.") + // Copy comments from proto file. + genServiceComments(g, service) + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { g.P("//") g.P(deprecationComment) } - g.Annotate(clientName, service.Location) + g.AnnotateSymbol(clientName, protogen.Annotation{Location: service.Location}) g.P("type ", clientName, " interface {") for _, method := range service.Methods { - g.Annotate(clientName+"."+method.GoName, method.Location) + g.AnnotateSymbol(clientName+"."+method.GoName, protogen.Annotation{Location: method.Location}) if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { g.P(deprecationComment) } @@ -241,15 +268,19 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated serverType := service.GoName + "Server" g.P("// ", serverType, " is the server API for ", service.GoName, " service.") g.P("// All implementations ", mustOrShould, " embed Unimplemented", serverType) - g.P("// for forward compatibility") + g.P("// for forward compatibility.") + + // Copy comments from proto file. + genServiceComments(g, service) + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { g.P("//") g.P(deprecationComment) } - g.Annotate(serverType, service.Location) + g.AnnotateSymbol(serverType, protogen.Annotation{Location: service.Location}) g.P("type ", serverType, " interface {") for _, method := range service.Methods { - g.Annotate(serverType+"."+method.GoName, method.Location) + g.AnnotateSymbol(serverType+"."+method.GoName, protogen.Annotation{Location: method.Location}) if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { g.P(deprecationComment) } @@ -279,6 +310,13 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated } serviceDescVar := service.GoName + "_ServiceDesc" g.P("func Register", service.GoName, "Server(s ", grpcPackage.Ident("ServiceRegistrar"), ", srv ", serverType, ") {") + g.P("// If the following call pancis, it indicates Unimplemented", serverType, " was") + g.P("// embedded by pointer and is nil. This will cause panics if an") + g.P("// unimplemented method is ever invoked, so we test this at initialization") + g.P("// time to prevent it from happening at runtime later due to I/O.") + g.P("if t, ok := srv.(interface { testEmbeddedByValue() }); ok {") + g.P("t.testEmbeddedByValue()") + g.P("}") g.P("s.RegisterService(&", serviceDescVar, `, srv)`) g.P("}") g.P() @@ -295,12 +333,27 @@ func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { s += "*" + g.QualifiedGoIdent(method.Output.GoIdent) } else { - s += method.Parent.GoName + "_" + method.GoName + "Client" + if *useGenericStreams { + s += clientStreamInterface(g, method) + } else { + s += method.Parent.GoName + "_" + method.GoName + "Client" + } } s += ", error)" return s } +func clientStreamInterface(g *protogen.GeneratedFile, method *protogen.Method) string { + typeParam := g.QualifiedGoIdent(method.Input.GoIdent) + ", " + g.QualifiedGoIdent(method.Output.GoIdent) + if method.Desc.IsStreamingClient() && method.Desc.IsStreamingServer() { + return g.QualifiedGoIdent(grpcPackage.Ident("BidiStreamingClient")) + "[" + typeParam + "]" + } else if method.Desc.IsStreamingClient() { + return g.QualifiedGoIdent(grpcPackage.Ident("ClientStreamingClient")) + "[" + typeParam + "]" + } else { // i.e. if method.Desc.IsStreamingServer() + return g.QualifiedGoIdent(grpcPackage.Ident("ServerStreamingClient")) + "[" + g.QualifiedGoIdent(method.Output.GoIdent) + "]" + } +} + func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { service := method.Parent fmSymbol := helper.formatFullMethodSymbol(service, method) @@ -309,20 +362,27 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P(deprecationComment) } g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") + g.P("cOpts := append([]", grpcPackage.Ident("CallOption"), "{", grpcPackage.Ident("StaticMethod()"), "}, opts...)") if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { g.P("out := new(", method.Output.GoIdent, ")") - g.P(`err := c.cc.Invoke(ctx, `, fmSymbol, `, in, out, opts...)`) + g.P(`err := c.cc.Invoke(ctx, `, fmSymbol, `, in, out, cOpts...)`) g.P("if err != nil { return nil, err }") g.P("return out, nil") g.P("}") g.P() return } - streamType := unexport(service.GoName) + method.GoName + "Client" + + streamImpl := unexport(service.GoName) + method.GoName + "Client" + if *useGenericStreams { + typeParam := g.QualifiedGoIdent(method.Input.GoIdent) + ", " + g.QualifiedGoIdent(method.Output.GoIdent) + streamImpl = g.QualifiedGoIdent(grpcPackage.Ident("GenericClientStream")) + "[" + typeParam + "]" + } + serviceDescVar := service.GoName + "_ServiceDesc" - g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], `, fmSymbol, `, opts...)`) + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], `, fmSymbol, `, cOpts...)`) g.P("if err != nil { return nil, err }") - g.P("x := &", streamType, "{stream}") + g.P("x := &", streamImpl, "{ClientStream: stream}") if !method.Desc.IsStreamingClient() { g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") @@ -331,11 +391,20 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("}") g.P() + // Auxiliary types aliases, for backwards compatibility. + if *useGenericStreams { + g.P("// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.") + g.P("type ", service.GoName, "_", method.GoName, "Client = ", clientStreamInterface(g, method)) + g.P() + return + } + + // Stream auxiliary types and methods, if we're not taking advantage of the + // pre-implemented generic types and their methods. genSend := method.Desc.IsStreamingClient() genRecv := method.Desc.IsStreamingServer() genCloseAndRecv := !method.Desc.IsStreamingServer() - // Stream auxiliary types and methods. g.P("type ", service.GoName, "_", method.GoName, "Client interface {") if genSend { g.P("Send(*", method.Input.GoIdent, ") error") @@ -350,19 +419,19 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("}") g.P() - g.P("type ", streamType, " struct {") + g.P("type ", streamImpl, " struct {") g.P(grpcPackage.Ident("ClientStream")) g.P("}") g.P() if genSend { - g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {") + g.P("func (x *", streamImpl, ") Send(m *", method.Input.GoIdent, ") error {") g.P("return x.ClientStream.SendMsg(m)") g.P("}") g.P() } if genRecv { - g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {") + g.P("func (x *", streamImpl, ") Recv() (*", method.Output.GoIdent, ", error) {") g.P("m := new(", method.Output.GoIdent, ")") g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") g.P("return m, nil") @@ -370,7 +439,7 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P() } if genCloseAndRecv { - g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") + g.P("func (x *", streamImpl, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") g.P("m := new(", method.Output.GoIdent, ")") g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") @@ -391,7 +460,11 @@ func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent)) } if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { - reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + if *useGenericStreams { + reqArgs = append(reqArgs, serverStreamInterface(g, method)) + } else { + reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + } } return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret } @@ -437,6 +510,17 @@ func genServiceDesc(file *protogen.File, g *protogen.GeneratedFile, serviceDescV g.P() } +func serverStreamInterface(g *protogen.GeneratedFile, method *protogen.Method) string { + typeParam := g.QualifiedGoIdent(method.Input.GoIdent) + ", " + g.QualifiedGoIdent(method.Output.GoIdent) + if method.Desc.IsStreamingClient() && method.Desc.IsStreamingServer() { + return g.QualifiedGoIdent(grpcPackage.Ident("BidiStreamingServer")) + "[" + typeParam + "]" + } else if method.Desc.IsStreamingClient() { + return g.QualifiedGoIdent(grpcPackage.Ident("ClientStreamingServer")) + "[" + typeParam + "]" + } else { // i.e. if method.Desc.IsStreamingServer() + return g.QualifiedGoIdent(grpcPackage.Ident("ServerStreamingServer")) + "[" + g.QualifiedGoIdent(method.Output.GoIdent) + "]" + } +} + func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, hnameFuncNameFormatter func(string) string) string { service := method.Parent hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) @@ -459,23 +543,38 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P() return hname } - streamType := unexport(service.GoName) + method.GoName + "Server" + + streamImpl := unexport(service.GoName) + method.GoName + "Server" + if *useGenericStreams { + typeParam := g.QualifiedGoIdent(method.Input.GoIdent) + ", " + g.QualifiedGoIdent(method.Output.GoIdent) + streamImpl = g.QualifiedGoIdent(grpcPackage.Ident("GenericServerStream")) + "[" + typeParam + "]" + } + g.P("func ", hnameFuncNameFormatter(hname), "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") if !method.Desc.IsStreamingClient() { g.P("m := new(", method.Input.GoIdent, ")") g.P("if err := stream.RecvMsg(m); err != nil { return err }") - g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamImpl, "{ServerStream: stream})") } else { - g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamImpl, "{ServerStream: stream})") } g.P("}") g.P() + // Auxiliary types aliases, for backwards compatibility. + if *useGenericStreams { + g.P("// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.") + g.P("type ", service.GoName, "_", method.GoName, "Server = ", serverStreamInterface(g, method)) + g.P() + return hname + } + + // Stream auxiliary types and methods, if we're not taking advantage of the + // pre-implemented generic types and their methods. genSend := method.Desc.IsStreamingServer() genSendAndClose := !method.Desc.IsStreamingServer() genRecv := method.Desc.IsStreamingClient() - // Stream auxiliary types and methods. g.P("type ", service.GoName, "_", method.GoName, "Server interface {") if genSend { g.P("Send(*", method.Output.GoIdent, ") error") @@ -490,25 +589,25 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("}") g.P() - g.P("type ", streamType, " struct {") + g.P("type ", streamImpl, " struct {") g.P(grpcPackage.Ident("ServerStream")) g.P("}") g.P() if genSend { - g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {") + g.P("func (x *", streamImpl, ") Send(m *", method.Output.GoIdent, ") error {") g.P("return x.ServerStream.SendMsg(m)") g.P("}") g.P() } if genSendAndClose { - g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {") + g.P("func (x *", streamImpl, ") SendAndClose(m *", method.Output.GoIdent, ") error {") g.P("return x.ServerStream.SendMsg(m)") g.P("}") g.P() } if genRecv { - g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {") + g.P("func (x *", streamImpl, ") Recv() (*", method.Input.GoIdent, ", error) {") g.P("m := new(", method.Input.GoIdent, ")") g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") g.P("return m, nil") diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go index 340eaf3ee7b..183ba69742e 100644 --- a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go @@ -38,12 +38,14 @@ import ( "fmt" "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/pluginpb" ) -const version = "1.3.0" +const version = "1.5.1" var requireUnimplemented *bool +var useGenericStreams *bool func main() { showVersion := flag.Bool("version", false, "print the version and exit") @@ -55,11 +57,14 @@ func main() { var flags flag.FlagSet requireUnimplemented = flags.Bool("require_unimplemented_servers", true, "set to false to match legacy behavior") + useGenericStreams = flags.Bool("use_generic_streams_experimental", true, "set to true to use generic types for streaming client and server objects; this flag is EXPERIMENTAL and may be changed or removed in a future release") protogen.Options{ ParamFunc: flags.Set, }.Run(func(gen *protogen.Plugin) error { - gen.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + gen.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) | uint64(pluginpb.CodeGeneratorResponse_FEATURE_SUPPORTS_EDITIONS) + gen.SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + gen.SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 for _, f := range gen.Files { if !f.Generate { continue diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/protoc-gen-go-grpc_test.sh b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/protoc-gen-go-grpc_test.sh new file mode 100644 index 00000000000..32e8e26ecdc --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/protoc-gen-go-grpc_test.sh @@ -0,0 +1,50 @@ +#!/bin/bash -e + +# Copyright 2024 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Uncomment to enable debugging. +# set -x + +WORKDIR="$(dirname $0)" +TEMPDIR=$(mktemp -d) + +trap "rm -rf ${TEMPDIR}" EXIT + +# Build protoc-gen-go-grpc binary and add to $PATH. +pushd "${WORKDIR}" +go build -o "${TEMPDIR}" . +PATH="${TEMPDIR}:${PATH}" +popd + +protoc \ + --go-grpc_out="${TEMPDIR}" \ + --go-grpc_opt=paths=source_relative,use_generic_streams_experimental=true \ + "examples/route_guide/routeguide/route_guide.proto" + +GOLDENFILE="examples/route_guide/routeguide/route_guide_grpc.pb.go" +GENFILE="${TEMPDIR}/examples/route_guide/routeguide/route_guide_grpc.pb.go" + +# diff is piped to [[ $? == 1 ]] to avoid exiting on diff but exit on error +# (like if the file was not found). See man diff for more info. +DIFF=$(diff "${GOLDENFILE}" "${GENFILE}" || [[ $? == 1 ]]) +if [[ -n "${DIFF}" ]]; then + echo -e "ERROR: Generated file differs from golden file:\n${DIFF}" + echo -e "If you have made recent changes to protoc-gen-go-grpc," \ + "please regenerate the golden files by running:" \ + "\n\t go generate google.golang.org/grpc/..." >&2 + exit 1 +fi + +echo SUCCESS diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7c09..00000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1fe1..0b42c302b24 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index ca4d0331543..38cb5cf0d74 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 93ceaeb2f9c..55fc7f65f10 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/handshaker.proto @@ -458,11 +458,11 @@ type ServerHandshakeParameters struct { // (Optional) A list of local identities supported by the server, if // specified. Otherwise, the handshaker chooses a default local identity. LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` - // (Optional) An access token created by the caller only intended for use in - // ALTS connections. The access token that should be used to authenticate to - // the peer. The access token MUST be strongly bound to the ALTS credentials + // A token created by the caller only intended for use in + // ALTS connections. The token should be used to authenticate to + // the peer. The token MUST be strongly bound to the ALTS credentials // used to establish the connection that the token is sent over. - AccessToken *string `protobuf:"bytes,3,opt,name=access_token,json=accessToken,proto3,oneof" json:"access_token,omitempty"` + Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"` } func (x *ServerHandshakeParameters) Reset() { @@ -511,9 +511,9 @@ func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { return nil } -func (x *ServerHandshakeParameters) GetAccessToken() string { - if x != nil && x.AccessToken != nil { - return *x.AccessToken +func (x *ServerHandshakeParameters) GetToken() string { + if x != nil && x.Token != nil { + return *x.Token } return "" } @@ -1110,7 +1110,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xbe, 0x01, 0x0a, 0x19, 0x53, 0x65, + 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, @@ -1119,130 +1119,128 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, - 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, - 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, + 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, - 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, - 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, - 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, - 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, - 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, - 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, - 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, - 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, - 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, + 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, + 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, + 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, + 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, + 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, + 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, + 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, + 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, + 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, + 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, + 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, + 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, + 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, - 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, - 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, - 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, - 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, - 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, - 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, - 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, - 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index ba1c46f64b1..358074b6494 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/gcp/handshaker.proto @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" @@ -49,7 +49,7 @@ type HandshakerServiceClient interface { // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. - DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) } type handshakerServiceClient struct { @@ -60,36 +60,18 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl return &handshakerServiceClient{cc} } -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &handshakerServiceDoHandshakeClient{stream} + x := &grpc.GenericClientStream[HandshakerReq, HandshakerResp]{ClientStream: stream} return x, nil } -type HandshakerService_DoHandshakeClient interface { - Send(*HandshakerReq) error - Recv() (*HandshakerResp, error) - grpc.ClientStream -} - -type handshakerServiceDoHandshakeClient struct { - grpc.ClientStream -} - -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { - m := new(HandshakerResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerReq, HandshakerResp] // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer @@ -101,7 +83,7 @@ type HandshakerServiceServer interface { // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. - DoHandshake(HandshakerService_DoHandshakeServer) error + DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error mustEmbedUnimplementedHandshakerServiceServer() } @@ -109,7 +91,7 @@ type HandshakerServiceServer interface { type UnimplementedHandshakerServiceServer struct { } -func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { +func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} @@ -126,30 +108,11 @@ func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServ } func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) + return srv.(HandshakerServiceServer).DoHandshake(&grpc.GenericServerStream[HandshakerReq, HandshakerResp]{ServerStream: stream}) } -type HandshakerService_DoHandshakeServer interface { - Send(*HandshakerResp) error - Recv() (*HandshakerReq, error) - grpc.ServerStream -} - -type handshakerServiceDoHandshakeServer struct { - grpc.ServerStream -} - -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { - m := new(HandshakerReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeServer = grpc.BidiStreamingServer[HandshakerReq, HandshakerResp] // HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 3e53b2b13bc..18cc9cfbd59 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index f6b55c68b56..665e790bb0f 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" - "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. @@ -287,5 +287,5 @@ type ChannelzSecurityValue interface { type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string - Value protoadapt.MessageV1 + Value proto.Message } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34edf9..4114358545e 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 402493224e0..f5453d48a53 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -36,6 +37,11 @@ import ( "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,6 +49,14 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions @@ -80,6 +94,7 @@ type dialOptions struct { idleTimeout time.Duration recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -300,6 +328,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -314,10 +345,8 @@ func WithBlock() DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -387,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -470,9 +499,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a // later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { @@ -601,12 +629,22 @@ func WithDisableRetry() DialOption { }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all @@ -645,10 +683,11 @@ func defaultDialOptions() dialOptions { idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // @@ -702,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + // WithRecvBufferPool returns a DialOption that configures the ClientConn // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 5bf880d4190..38b88350735 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 4c46c098dc6..51b736ba06e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/health/v1/health.proto @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -43,6 +43,10 @@ const ( // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthClient interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -81,8 +85,9 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { } func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -90,11 +95,12 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{stream} + x := &healthWatchClient{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -124,6 +130,10 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer // for forward compatibility +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthServer interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -198,7 +208,7 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) + return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) } type Health_WatchServer interface { diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a32..b15cf482d29 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 6bf7f87396f..13821a92660 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -75,7 +75,6 @@ func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) if err != nil { return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) } - return &lbConfig{childBuilder: builder, childConfig: cfg}, nil } diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 45d5e50ea9b..73bb4c4ee9a 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -169,7 +169,6 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() - gsbCfg, ok := state.BalancerConfig.(*lbConfig) if ok { // Switch to the child in the config unless it is already active. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index e8456a77c25..aa4505a871d 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -65,7 +65,7 @@ type TruncatingMethodLogger struct { callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } // NewTruncatingMethodLogger returns a new truncating method logger. @@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: DefaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } @@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 685a3cb41b1..d9064871394 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -28,9 +28,6 @@ import ( var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds @@ -43,6 +40,12 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b5108..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1ef..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go index 9f409096798..e8d866984b3 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -20,8 +20,6 @@ package grpcutil import ( "strings" - - "google.golang.org/grpc/internal/envconfig" ) // RegisteredCompressorNames holds names of the registered compressors. @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool { // RegisteredCompressors returns a string of registered compressor names // separated by comma. func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } return strings.Join(RegisteredCompressorNames, ",") } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4e6..5d665398692 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -184,25 +193,25 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index abab35e250e..4552db16b02 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,28 +36,35 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false -// ResolvingTimeout specifies the maximum duration for a DNS resolution request. -// If the timeout expires before a response is received, the request will be canceled. -// -// It is recommended to set this value at application startup. Avoid modifying this variable -// after initialization as it's not thread-safe for concurrent modification. -var ResolvingTimeout = 30 * time.Second + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second -var logger = grpclog.Component("dns") + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -203,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = internal.MinResolutionRate + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -217,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -417,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index c7fc557d00c..c0eae4f5f83 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -28,7 +28,7 @@ import ( // NetResolver groups the methods on net.Resolver that are used by the DNS // resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. +// overridden from tests. type NetResolver interface { LookupHost(ctx context.Context, host string) (addrs []string, err error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) @@ -50,16 +50,23 @@ var ( // The following vars are overridden from tests. var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 83c3829826a..3deadfb4a20 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -193,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err return false, c.err } if f != nil { - if !f(it) { // f wasn't successful + if !f() { // f wasn't successful c.mu.Unlock() return false, nil } @@ -495,21 +495,22 @@ type loopyWriter struct { ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, } return l } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index deba0c4d9ef..3c63c706986 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -114,11 +114,11 @@ type http2Client struct { streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -408,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerErrCh := make(chan error, 1) go t.reader(readerErrCh) defer func() { - if err == nil { - err = <-readerErrCh - } if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) t.Close(err) } }() @@ -458,8 +458,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -517,6 +521,17 @@ func (t *http2Client) getPeer() *peer.Peer { } } +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ @@ -781,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -793,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -819,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -834,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { // Connection closed. @@ -946,7 +961,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -966,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. +// accessed anymore. func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only close once. @@ -991,7 +1006,10 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + <-t.writerDone t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1099,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1252,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index d582e047109..b7091165b50 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -43,7 +44,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -330,8 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -860,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1014,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -1208,7 +1208,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } if !outstandingPing { @@ -1440,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 0d2a6e47f67..4b39c0ade97 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index a821ff9b2b7..499a49c8c1c 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -39,6 +41,34 @@ type Peer struct { AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bf56faa76d3..bdaa2130e48 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,8 +20,9 @@ package grpc import ( "context" + "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -32,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -97,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = ctx.Err().Error() + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) } switch ctx.Err() { case context.DeadlineExceeded: @@ -144,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -196,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 00000000000..9ace83ccb67 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 00000000000..6997e474031 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + "google.golang.org/grpc/reflection/internal" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return internal.V1AlphaToV1Request(resp), nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 00000000000..666eda8e5f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.25.2 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 00000000000..17d21fde22a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,165 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 00000000000..cd032acefca --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,1028 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc v4.25.2 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server set one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000000..93886e38216 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,162 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go new file mode 100644 index 00000000000..36ee6507507 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go @@ -0,0 +1,436 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code that is shared by both reflection package and +// the test package. The packages are split in this way inorder to avoid +// depenedency to deprecated package github.com/golang/protobuf. +package internal + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerReflectionServer is the server API for ServerReflection service. +type ServerReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + S ServiceInfoProvider + DescResolver protodesc.Resolver + ExtResolver ExtensionResolver +} + +// FileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + if fd.IsPlaceholder() { + // If the given root file is a placeholder, treat it + // as missing instead of serializing it. + return nil, protoregistry.NotFound + } + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if currentfd.IsPlaceholder() { + // Skip any missing files in the dependency graph. + continue + } + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// FileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// FileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// AllExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// ListServices returns the names of services this server exposes. +func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.S.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &v1reflectionpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.DescResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.FileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.ListServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha. +func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1. +func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha. +func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1. +func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { + var v1 v1reflectionpb.ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1reflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 00000000000..13a94e2dd2e --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/reflection/internal" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. +func Register(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &internal.ServerReflectionServer{ + S: opts.Services, + DescResolver: opts.DescriptorResolver, + ExtResolver: opts.ExtensionResolver, + } +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8ab0f..3edca296c22 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -63,7 +63,7 @@ LEGACY_SOURCES=( # Generates only the new gRPC Service symbols SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto @@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ @@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # grpc_testing_not_regenerate/*.pb.go are not re-generated, # see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index b54a3a3225d..ef3d6ed6c43 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -18,9 +18,6 @@ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. package dns import ( @@ -52,3 +49,12 @@ func SetResolvingTimeout(timeout time.Duration) { func NewBuilder() resolver.Builder { return dns.NewBuilder() } + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f89..c5fb45236fa 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -171,7 +171,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 998e251ddc4..fdd49e6e915 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -964,7 +964,7 @@ func setCallInfoCodec(c *callInfo) error { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. +// support package version is 9. // // Older versions are kept for compatibility. // @@ -976,6 +976,7 @@ const ( SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index fd4558daa52..89f8e4792bf 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -527,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption { }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } } // HeaderTableSize returns a ServerOption that sets the size of dynamic diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2b35c5d2130..2671c5ef69f 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,16 +164,18 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,8 +221,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -239,13 +242,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d462..fdb0bd65182 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {} type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any @@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d939ffc6348..8051ef5b514 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,7 +35,6 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" @@ -516,6 +516,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s + a.ctx = s.Context() a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -698,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 00000000000..8b813529c0c --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + Recv() (*Res, error) + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingServer[Res any] interface { + Send(*Res) error + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + Send(*Req) error + CloseAndRecv() (*Res, error) + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + SendAndClose(*Res) error + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + Send(*Req) error + Recv() (*Res, error) + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + Send(*Res) error + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2556f758386..bafaef99be9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.63.2" +const Version = "1.65.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 7e6b92e491a..00000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not use time.After except in tests. It has the potential to leak the -# timer since there is no way to stop it early. -git grep -l 'time.After(' -- "*.go" | not grep -v '_test.go\|test_utils\|testutils' - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, go vet, go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: -Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -SwitchTo is deprecated: -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' - -echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go index 0048beb1e32..4e9c9aea75d 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -132,7 +132,7 @@ func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f panic("too many dependencies") // sanity check } - g.P("var ", goTypesVarName(f), " = []interface{}{") + g.P("var ", goTypesVarName(f), " = []any{") for _, s := range goTypes { g.P(s) } @@ -170,7 +170,7 @@ func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f idx := f.allMessagesByPtr[message] typesVar := messageTypesVarName(f) - g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P(typesVar, "[", idx, "].Exporter = func(v any, i int) any {") g.P("switch v := v.(*", message.GoIdent, "); i {") for i := 0; i < sf.count; i++ { if name := sf.unexported[i]; name != "" { @@ -191,7 +191,7 @@ func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f typesVar := messageTypesVarName(f) // Associate the wrapper types by directly passing them to the MessageInfo. - g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + g.P(typesVar, "[", idx, "].OneofWrappers = []any {") for _, oneof := range message.Oneofs { if !oneof.Desc.IsSynthetic() { for _, field := range oneof.Fields { diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go index 47c4fa18f95..9a7b59030b7 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -213,11 +213,11 @@ func genPackageKnownComment(f *fileInfo) protogen.Comments { The standard Go "encoding/json" package has functionality to serialize arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and ListValue.AsSlice methods can convert the protobuf message representation into - a form represented by interface{}, map[string]interface{}, and []interface{}. + a form represented by any, map[string]any, and []any. This form can be used with other packages that operate on such data structures and also directly with the standard json package. - In order to convert the interface{}, map[string]interface{}, and []interface{} + In order to convert the any, map[string]any, and []any forms back as Value, Struct, and ListValue messages, use the NewStruct, NewList, and NewValue constructor functions. @@ -252,28 +252,28 @@ func genPackageKnownComment(f *fileInfo) protogen.Comments { To construct a Value message representing the above JSON object: - m, err := structpb.NewValue(map[string]interface{}{ + m, err := structpb.NewValue(map[string]any{ "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 27, - "address": map[string]interface{}{ + "address": map[string]any{ "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100", }, - "phoneNumbers": []interface{}{ - map[string]interface{}{ + "phoneNumbers": []any{ + map[string]any{ "type": "home", "number": "212 555-1234", }, - map[string]interface{}{ + map[string]any{ "type": "office", "number": "646 555-4567", }, }, - "children": []interface{}{}, + "children": []any{}, "spouse": nil, }) if err != nil { @@ -634,7 +634,7 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P("// NewStruct constructs a Struct from a general-purpose Go map.") g.P("// The map keys must be valid UTF-8.") g.P("// The map values are converted using NewValue.") - g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P("func NewStruct(v map[string]any) (*Struct, error) {") g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") g.P(" for k, v := range v {") g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") @@ -652,9 +652,9 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P("// AsMap converts x to a general-purpose Go map.") g.P("// The map values are converted by calling Value.AsInterface.") - g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P("func (x *Struct) AsMap() map[string]any {") g.P(" f := x.GetFields()") - g.P(" vs := make(map[string]interface{}, len(f))") + g.P(" vs := make(map[string]any, len(f))") g.P(" for k, v := range f {") g.P(" vs[k] = v.AsInterface()") g.P(" }") @@ -675,7 +675,7 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message case genid.ListValue_message_fullname: g.P("// NewList constructs a ListValue from a general-purpose Go slice.") g.P("// The slice elements are converted using NewValue.") - g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P("func NewList(v []any) (*ListValue, error) {") g.P(" x := &ListValue{Values: make([]*Value, len(v))}") g.P(" for i, v := range v {") g.P(" var err error") @@ -690,9 +690,9 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P("// AsSlice converts x to a general-purpose Go slice.") g.P("// The slice elements are converted by calling Value.AsInterface.") - g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P("func (x *ListValue) AsSlice() []any {") g.P(" vals := x.GetValues()") - g.P(" vs := make([]interface{}, len(vals))") + g.P(" vs := make([]any, len(vals))") g.P(" for i, v := range vals {") g.P(" vs[i] = v.AsInterface()") g.P(" }") @@ -723,13 +723,13 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P("// ║ float32, float64 │ stored as NumberValue ║") g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") - g.P("// ║ map[string]interface{} │ stored as StructValue ║") - g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ║ map[string]any │ stored as StructValue ║") + g.P("// ║ []any │ stored as ListValue ║") g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") g.P("//") g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") g.P("// is possible since they are stored as a float64.") - g.P("func NewValue(v interface{}) (*Value, error) {") + g.P("func NewValue(v any) (*Value, error) {") g.P(" switch v := v.(type) {") g.P(" case nil:") g.P(" return NewNullValue(), nil") @@ -759,13 +759,13 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P(" case []byte:") g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") g.P(" return NewStringValue(s), nil") - g.P(" case map[string]interface{}:") + g.P(" case map[string]any:") g.P(" v2, err := NewStruct(v)") g.P(" if err != nil {") g.P(" return nil, err") g.P(" }") g.P(" return NewStructValue(v2), nil") - g.P(" case []interface{}:") + g.P(" case []any:") g.P(" v2, err := NewList(v)") g.P(" if err != nil {") g.P(" return nil, err") @@ -820,7 +820,7 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message g.P("//") g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") g.P("// converted as strings to remain compatible with MarshalJSON.") - g.P("func (x *Value) AsInterface() interface{} {") + g.P("func (x *Value) AsInterface() any {") g.P(" switch v := x.GetKind().(type) {") g.P(" case *Value_NumberValue:") g.P(" if v != nil {") diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go index b631914dc26..1024a84e7d3 100644 --- a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -955,7 +955,7 @@ func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) // P prints a line to the generated output. It converts each parameter to a // string following the same rules as [fmt.Print]. It never inserts spaces // between parameters. -func (g *GeneratedFile) P(v ...interface{}) { +func (g *GeneratedFile) P(v ...any) { for _, x := range v { switch x := x.(type) { case GoIdent: diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371a6..bb2966e3b4c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bce3..24bc98ac422 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031e1..ea1d3e65a57 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786d0..099b2bf451b 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d96719829c2..c2d6bd5265d 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea328..df53ff40b25 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -383,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cdf80..8a57d60b08c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -534,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb487..e56c91a8dbe 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef4e..ba83fea44c3 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a11987b..f30ab6b586f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de14..5d5771c2ed5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc94..f29e6a8fa88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c27b..4bb0a7a20ce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1e4..7a16ec13dd1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa5b..e06ece55a26 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516f9..18cb96fd70a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a0576d..304244a651d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e7e..febd2122472 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae1d..e31249f64f7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d0057e..81b2b1a763d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1fe7a..bf0b6049b46 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdcedd..019399d454d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbdade6..ecb4623d701 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd3552..99dc23c6f0a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434c7..da685e8a29d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31164..5f20ca5d8ab 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go index a319550f69e..17b3f27b8a2 100644 --- a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go +++ b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go @@ -86,7 +86,7 @@ func appendMessage(b []byte, m protoreflect.Message) []byte { return b } -var protocmpMessageType = reflect.TypeOf(map[string]interface{}(nil)) +var protocmpMessageType = reflect.TypeOf(map[string]any(nil)) func appendKnownMessage(b []byte, m protoreflect.Message) []byte { md := m.Descriptor() @@ -98,7 +98,7 @@ func appendKnownMessage(b []byte, m protoreflect.Message) []byte { if v := reflect.ValueOf(m); v.Type().ConvertibleTo(protocmpMessageType) { // For protocmp.Message, directly obtain the sub-message value // which is stored in structured form, rather than as raw bytes. - m2 := v.Convert(protocmpMessageType).Interface().(map[string]interface{}) + m2 := v.Convert(protocmpMessageType).Interface().(map[string]any) v, ok := m2[string(genid.Any_Value_field_name)].(proto.Message) if !ok { return nil diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5b7..a1f09162d05 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba508022..dbbf1f6862c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a697..d248f292846 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca585424..f3cebab29c8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index c6293086750..6de31c2ebdb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d31178..ea154eec44d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe5204..cd8fadbaf8f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4e8..75f83a2af03 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 1603097311e..9fe83cef5a9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8e1..7f3583ead81 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011173..f7d386990a0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a67..de177733914 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 10c9030eb03..9403eb07507 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3012,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3075,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -4706,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4779,438 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, - 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, - 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, - 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, - 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, - 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, - 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, - 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, - 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, - 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, - 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, - 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, - 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, - 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, - 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, - 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, - 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5227,7 +5243,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5329,38 +5345,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5369,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5381,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5393,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5405,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5419,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5431,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5443,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5455,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5467,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5479,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5491,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5505,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5519,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5533,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5547,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5561,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5575,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5589,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5603,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5615,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5629,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5641,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5653,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5665,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5677,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5689,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5701,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5713,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5725,7 +5742,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state @@ -5737,7 +5754,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5749,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5761,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5773,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index 39b024b46b8..1ba1dfa5ad8 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -294,7 +294,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { case fd.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: fd, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case fd.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: fd}) @@ -450,7 +450,7 @@ func (x *dynamicList) IsValid() bool { type dynamicMap struct { desc protoreflect.FieldDescriptor - mapv map[interface{}]protoreflect.Value + mapv map[any]protoreflect.Value } func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } @@ -634,11 +634,11 @@ func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { // // The InterfaceOf and ValueOf methods of the extension type are defined as: // -// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +// func (xt extensionType) ValueOf(iv any) protoreflect.Value { // return protoreflect.ValueOf(iv) // } // -// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { // return v.Interface() // } // @@ -658,7 +658,7 @@ func (xt extensionType) New() protoreflect.Value { case xt.desc.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: xt.desc, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case xt.desc.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) @@ -686,18 +686,18 @@ func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { return xt.desc } -func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +func (xt extensionType) ValueOf(iv any) protoreflect.Value { v := protoreflect.ValueOf(iv) typecheck(xt.desc, v) return v } -func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +func (xt extensionType) InterfaceOf(v protoreflect.Value) any { typecheck(xt.desc, v) return v.Interface() } -func (xt extensionType) IsValidInterface(iv interface{}) bool { +func (xt extensionType) IsValidInterface(iv any) bool { return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index b0df3fb3340..a2ca940c50f 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,27 +90,27 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, - 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, - 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, - 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, - 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, - 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, - 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x70, 0x62, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -126,7 +126,7 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { } var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []interface{}{ +var file_google_protobuf_go_features_proto_goTypes = []any{ (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } @@ -146,7 +146,7 @@ func file_google_protobuf_go_features_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be5403..7172b43d383 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd4c..1b71bcd910a 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba394..d87b4fb8281 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb331e..ac1e91bb6dd 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b88ea..d45361cbc72 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a3363e..83a5a645b08 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a87130f8..e473f826aa3 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go index 620728f8cca..9066bcc7f3b 100644 --- a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -591,7 +591,7 @@ func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ +var file_google_protobuf_compiler_plugin_proto_goTypes = []any{ (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature (*Version)(nil), // 1: google.protobuf.compiler.Version (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest @@ -619,7 +619,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Version); i { case 0: return &v.state @@ -631,7 +631,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*CodeGeneratorRequest); i { case 0: return &v.state @@ -643,7 +643,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CodeGeneratorResponse); i { case 0: return &v.state @@ -655,7 +655,7 @@ func file_google_protobuf_compiler_plugin_proto_init() { return nil } } - file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CodeGeneratorResponse_File); i { case 0: return &v.state diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index b1c5f6f4c09..6556eda65d5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -70,6 +70,12 @@ type CustomResourceDefinitionSpec struct { // Top-level and per-version columns are mutually exclusive. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition + // selectableFields specifies paths to fields that may be used as field selectors. + // A maximum of 8 selectable fields are allowed. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // Top-level and per-version columns are mutually exclusive. + // +optional + SelectableFields []SelectableField // `conversion` defines conversion settings for the CRD. Conversion *CustomResourceConversion @@ -207,6 +213,25 @@ type CustomResourceDefinitionVersion struct { // be explicitly set to null // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition + + // selectableFields specifies paths to fields that may be used as field selectors. + // A maximum of 8 selectable fields are allowed. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // +optional + SelectableFields []SelectableField +} + +// SelectableField specifies the JSON path of a field that may be used with field selectors. +type SelectableField struct { + // jsonPath is a simple JSON path which is evaluated against each custom resource to produce a + // field selector value. + // Only JSON paths without the array notation are allowed. + // Must point to a field of type string, boolean or integer. Types with enum values + // and strings with formats are allowed. + // If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. + // Must not point to metdata fields. + // Required. + JSONPath string } // CustomResourceColumnDefinition specifies a column for server side printing. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go index 4d29ff8235d..2ca72bb16b3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go @@ -80,7 +80,7 @@ func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefi out.Versions = []CustomResourceDefinitionVersion{{Name: in.Version, Served: true, Storage: true}} } - // If spec.{subresources,validation,additionalPrinterColumns} exists, move to versions + // If spec.{subresources,validation,additionalPrinterColumns,selectableFields} exists, move to versions if in.Subresources != nil { subresources := &CustomResourceSubresources{} if err := Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in.Subresources, subresources, s); err != nil { @@ -110,6 +110,17 @@ func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefi out.Versions[i].AdditionalPrinterColumns = additionalPrinterColumns } } + if in.SelectableFields != nil { + selectableFields := make([]SelectableField, len(in.SelectableFields)) + for i := range in.SelectableFields { + if err := Convert_apiextensions_SelectableField_To_v1_SelectableField(&in.SelectableFields[i], &selectableFields[i], s); err != nil { + return err + } + } + for i := range out.Versions { + out.Versions[i].SelectableFields = selectableFields + } + } return nil } @@ -125,13 +136,15 @@ func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefi // Copy versions[0] to version out.Version = out.Versions[0].Name - // If versions[*].{subresources,schema,additionalPrinterColumns} are identical, move to spec + // If versions[*].{subresources,schema,additionalPrinterColumns,selectableFields} are identical, move to spec subresources := out.Versions[0].Subresources subresourcesIdentical := true validation := out.Versions[0].Schema validationIdentical := true additionalPrinterColumns := out.Versions[0].AdditionalPrinterColumns additionalPrinterColumnsIdentical := true + selectableFields := out.Versions[0].SelectableFields + selectableFieldsIdentical := true // Detect if per-version fields are identical for _, v := range out.Versions { @@ -144,6 +157,9 @@ func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefi if additionalPrinterColumnsIdentical && !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, additionalPrinterColumns) { additionalPrinterColumnsIdentical = false } + if selectableFieldsIdentical && !apiequality.Semantic.DeepEqual(v.SelectableFields, selectableFields) { + selectableFieldsIdentical = false + } } // If they are, set the top-level fields and clear the per-version fields @@ -156,6 +172,9 @@ func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefi if additionalPrinterColumnsIdentical { out.AdditionalPrinterColumns = additionalPrinterColumns } + if selectableFieldsIdentical { + out.SelectableFields = selectableFields + } for i := range out.Versions { if subresourcesIdentical { out.Versions[i].Subresources = nil @@ -166,6 +185,9 @@ func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefi if additionalPrinterColumnsIdentical { out.Versions[i].AdditionalPrinterColumns = nil } + if selectableFieldsIdentical { + out.Versions[i].SelectableFields = nil + } } return nil diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index 6c22a516985..8e081e4b1c5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +// source: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } func (*ConversionRequest) ProtoMessage() {} func (*ConversionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{0} + return fileDescriptor_c5e101a0235c8c62, []int{0} } func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } func (*ConversionResponse) ProtoMessage() {} func (*ConversionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{1} + return fileDescriptor_c5e101a0235c8c62, []int{1} } func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo func (m *ConversionReview) Reset() { *m = ConversionReview{} } func (*ConversionReview) ProtoMessage() {} func (*ConversionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{2} + return fileDescriptor_c5e101a0235c8c62, []int{2} } func (m *ConversionReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_ConversionReview proto.InternalMessageInfo func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{3} + return fileDescriptor_c5e101a0235c8c62, []int{3} } func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } func (*CustomResourceConversion) ProtoMessage() {} func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{4} + return fileDescriptor_c5e101a0235c8c62, []int{4} } func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{5} + return fileDescriptor_c5e101a0235c8c62, []int{5} } func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{6} + return fileDescriptor_c5e101a0235c8c62, []int{6} } func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{7} + return fileDescriptor_c5e101a0235c8c62, []int{7} } func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{8} + return fileDescriptor_c5e101a0235c8c62, []int{8} } func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{9} + return fileDescriptor_c5e101a0235c8c62, []int{9} } func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{10} + return fileDescriptor_c5e101a0235c8c62, []int{10} } func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{11} + return fileDescriptor_c5e101a0235c8c62, []int{11} } func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{12} + return fileDescriptor_c5e101a0235c8c62, []int{12} } func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{13} + return fileDescriptor_c5e101a0235c8c62, []int{13} } func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{14} + return fileDescriptor_c5e101a0235c8c62, []int{14} } func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{15} + return fileDescriptor_c5e101a0235c8c62, []int{15} } func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } func (*ExternalDocumentation) ProtoMessage() {} func (*ExternalDocumentation) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{16} + return fileDescriptor_c5e101a0235c8c62, []int{16} } func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo func (m *JSON) Reset() { *m = JSON{} } func (*JSON) ProtoMessage() {} func (*JSON) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{17} + return fileDescriptor_c5e101a0235c8c62, []int{17} } func (m *JSON) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_JSON proto.InternalMessageInfo func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } func (*JSONSchemaProps) ProtoMessage() {} func (*JSONSchemaProps) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{18} + return fileDescriptor_c5e101a0235c8c62, []int{18} } func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +583,7 @@ var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } func (*JSONSchemaPropsOrArray) ProtoMessage() {} func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{19} + return fileDescriptor_c5e101a0235c8c62, []int{19} } func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +611,7 @@ var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } func (*JSONSchemaPropsOrBool) ProtoMessage() {} func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{20} + return fileDescriptor_c5e101a0235c8c62, []int{20} } func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +639,7 @@ var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{21} + return fileDescriptor_c5e101a0235c8c62, []int{21} } func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,10 +664,38 @@ func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo +func (m *SelectableField) Reset() { *m = SelectableField{} } +func (*SelectableField) ProtoMessage() {} +func (*SelectableField) Descriptor() ([]byte, []int) { + return fileDescriptor_c5e101a0235c8c62, []int{22} +} +func (m *SelectableField) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelectableField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelectableField) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelectableField.Merge(m, src) +} +func (m *SelectableField) XXX_Size() int { + return m.Size() +} +func (m *SelectableField) XXX_DiscardUnknown() { + xxx_messageInfo_SelectableField.DiscardUnknown(m) +} + +var xxx_messageInfo_SelectableField proto.InternalMessageInfo + func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{22} + return fileDescriptor_c5e101a0235c8c62, []int{23} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +723,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidationRule) Reset() { *m = ValidationRule{} } func (*ValidationRule) ProtoMessage() {} func (*ValidationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{23} + return fileDescriptor_c5e101a0235c8c62, []int{24} } func (m *ValidationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +751,7 @@ var xxx_messageInfo_ValidationRule proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{24} + return fileDescriptor_c5e101a0235c8c62, []int{25} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +779,7 @@ var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func (m *WebhookConversion) Reset() { *m = WebhookConversion{} } func (*WebhookConversion) ProtoMessage() {} func (*WebhookConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{25} + return fileDescriptor_c5e101a0235c8c62, []int{26} } func (m *WebhookConversion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,6 +831,7 @@ func init() { proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*SelectableField)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.SelectableField") proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ServiceReference") proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ValidationRule") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig") @@ -810,208 +839,209 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_f5a35c9667703937) -} - -var fileDescriptor_f5a35c9667703937 = []byte{ - // 3137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x5c, 0x47, - 0xf5, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0xec, 0xef, 0x8d, 0x9b, 0x78, 0x9d, - 0xcd, 0xb7, 0xc1, 0x6d, 0xd3, 0x75, 0x1b, 0x5a, 0x1a, 0xca, 0x2f, 0x79, 0x6d, 0xa7, 0x75, 0x13, - 0xc7, 0xd6, 0x6c, 0x92, 0xba, 0x2d, 0xa2, 0xbd, 0xde, 0x3b, 0xbb, 0xbe, 0xf5, 0xfd, 0x95, 0x99, - 0x7b, 0xfd, 0x43, 0x02, 0xa9, 0x02, 0x55, 0x40, 0x25, 0x28, 0x0f, 0xa8, 0x3c, 0x21, 0x84, 0x50, - 0x1f, 0xe0, 0x01, 0xde, 0xe0, 0x5f, 0xe8, 0x0b, 0x52, 0x25, 0x24, 0x54, 0x09, 0x69, 0x45, 0x97, - 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xcd, 0x8f, 0x3b, 0x77, 0xf6, 0xee, 0x6e, 0x12, 0xd9, - 0xeb, 0xf6, 0x6d, 0xf7, 0x9c, 0x33, 0xe7, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, 0xce, 0x1d, 0x60, - 0xed, 0x5c, 0xa7, 0x65, 0x27, 0x58, 0xd8, 0x89, 0xb7, 0x30, 0xf1, 0x71, 0x84, 0xe9, 0xc2, 0x2e, - 0xf6, 0xed, 0x80, 0x2c, 0x48, 0x86, 0x15, 0x3a, 0x78, 0x3f, 0xc2, 0x3e, 0x75, 0x02, 0x9f, 0x3e, - 0x6d, 0x85, 0x0e, 0xc5, 0x64, 0x17, 0x93, 0x85, 0x70, 0xa7, 0xc1, 0x78, 0xb4, 0x5d, 0x60, 0x61, - 0xf7, 0xd9, 0x85, 0x06, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x43, 0x12, 0x44, 0x01, 0xbc, 0x2e, - 0x34, 0x95, 0xdb, 0x04, 0xdf, 0x54, 0x9a, 0xca, 0xe1, 0x4e, 0x83, 0xf1, 0x68, 0xbb, 0x40, 0x79, - 0xf7, 0xd9, 0x99, 0xa7, 0x1b, 0x4e, 0xb4, 0x1d, 0x6f, 0x95, 0x6b, 0x81, 0xb7, 0xd0, 0x08, 0x1a, - 0xc1, 0x02, 0x57, 0xb8, 0x15, 0xd7, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x40, 0x33, 0xcf, 0xa5, - 0x26, 0x7b, 0x56, 0x6d, 0xdb, 0xf1, 0x31, 0x39, 0x48, 0xed, 0xf4, 0x70, 0x64, 0x75, 0x31, 0x6f, - 0x66, 0xa1, 0xd7, 0x28, 0x12, 0xfb, 0x91, 0xe3, 0xe1, 0x8e, 0x01, 0x5f, 0x7a, 0xd8, 0x00, 0x5a, - 0xdb, 0xc6, 0x9e, 0x95, 0x1d, 0x57, 0x3a, 0x34, 0xc0, 0xe4, 0x52, 0xe0, 0xef, 0x62, 0xc2, 0x26, - 0x88, 0xf0, 0xfd, 0x18, 0xd3, 0x08, 0x56, 0xc0, 0x40, 0xec, 0xd8, 0xa6, 0x31, 0x67, 0xcc, 0x8f, - 0x54, 0x9e, 0xf9, 0xa8, 0x59, 0x3c, 0xd5, 0x6a, 0x16, 0x07, 0xee, 0xae, 0x2e, 0x1f, 0x36, 0x8b, - 0x97, 0x7a, 0x21, 0x45, 0x07, 0x21, 0xa6, 0xe5, 0xbb, 0xab, 0xcb, 0x88, 0x0d, 0x86, 0x2f, 0x81, - 0x49, 0x1b, 0x53, 0x87, 0x60, 0x7b, 0x71, 0x63, 0xf5, 0x9e, 0xd0, 0x6f, 0xe6, 0xb8, 0xc6, 0xf3, - 0x52, 0xe3, 0xe4, 0x72, 0x56, 0x00, 0x75, 0x8e, 0x81, 0x9b, 0x60, 0x38, 0xd8, 0x7a, 0x1b, 0xd7, - 0x22, 0x6a, 0x0e, 0xcc, 0x0d, 0xcc, 0x8f, 0x5e, 0x7b, 0xba, 0x9c, 0x2e, 0x9e, 0x32, 0x81, 0xaf, - 0x98, 0x9c, 0x6c, 0x19, 0x59, 0x7b, 0x2b, 0xc9, 0xa2, 0x55, 0xc6, 0x25, 0xda, 0xf0, 0xba, 0xd0, - 0x82, 0x12, 0x75, 0xa5, 0x5f, 0xe5, 0x00, 0xd4, 0x27, 0x4f, 0xc3, 0xc0, 0xa7, 0xb8, 0x2f, 0xb3, - 0xa7, 0x60, 0xa2, 0xc6, 0x35, 0x47, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x28, 0xd6, 0x9b, 0x12, 0x7f, - 0x62, 0x29, 0xa3, 0x0e, 0x75, 0x00, 0xc0, 0x3b, 0x60, 0x88, 0x60, 0x1a, 0xbb, 0x91, 0x39, 0x30, - 0x67, 0xcc, 0x8f, 0x5e, 0xbb, 0xda, 0x13, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xf2, 0xee, 0xb3, 0xe5, - 0x6a, 0x64, 0x45, 0x31, 0xad, 0x9c, 0x91, 0x48, 0x43, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xbf, - 0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xeb, 0xe0, 0x3d, 0x48, 0xc0, 0x30, 0x11, 0xc1, 0xc2, 0xfd, 0x34, - 0x7a, 0xed, 0x66, 0xf9, 0xa8, 0x3b, 0xaa, 0xdc, 0x11, 0x7f, 0x95, 0x51, 0xb6, 0x5c, 0xf2, 0x0f, - 0x4a, 0x80, 0xe0, 0x2e, 0x28, 0x10, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0xaf, 0xdd, 0xea, 0x0f, 0xa8, - 0xd0, 0x59, 0x19, 0x6b, 0x35, 0x8b, 0x85, 0xe4, 0x1f, 0x52, 0x58, 0xa5, 0x5f, 0xe4, 0xc0, 0xec, - 0x52, 0x4c, 0xa3, 0xc0, 0x43, 0x98, 0x06, 0x31, 0xa9, 0xe1, 0xa5, 0xc0, 0x8d, 0x3d, 0x7f, 0x19, - 0xd7, 0x1d, 0xdf, 0x89, 0x58, 0x8c, 0xce, 0x81, 0x41, 0xdf, 0xf2, 0xb0, 0x8c, 0x99, 0x31, 0xe9, - 0xc9, 0xc1, 0xdb, 0x96, 0x87, 0x11, 0xe7, 0x30, 0x09, 0x16, 0x22, 0x72, 0x07, 0x28, 0x89, 0x3b, - 0x07, 0x21, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xa1, 0x7a, 0x40, 0x3c, 0x4b, 0xac, 0xde, 0x48, 0xba, - 0x1e, 0x37, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x1e, 0x8c, 0xda, 0x98, 0xd6, 0x88, 0x13, 0x32, 0x68, - 0x73, 0x90, 0x0b, 0x9f, 0x95, 0xc2, 0xa3, 0xcb, 0x29, 0x0b, 0xe9, 0x72, 0xf0, 0x2a, 0x28, 0x84, - 0xc4, 0x09, 0x88, 0x13, 0x1d, 0x98, 0xf9, 0x39, 0x63, 0x3e, 0x5f, 0x99, 0x90, 0x63, 0x0a, 0x1b, - 0x92, 0x8e, 0x94, 0x04, 0x93, 0x7e, 0x9b, 0x06, 0xfe, 0x86, 0x15, 0x6d, 0x9b, 0x43, 0x1c, 0x41, - 0x49, 0xbf, 0x52, 0x5d, 0xbf, 0xcd, 0xe8, 0x48, 0x49, 0x94, 0xfe, 0x6c, 0x00, 0x33, 0xeb, 0xa1, - 0xc4, 0xbd, 0xf0, 0x06, 0x28, 0xd0, 0x88, 0xe5, 0x9c, 0xc6, 0x81, 0xf4, 0xcf, 0x93, 0x89, 0xaa, - 0xaa, 0xa4, 0x1f, 0x36, 0x8b, 0xd3, 0xe9, 0x88, 0x84, 0xca, 0x7d, 0xa3, 0xc6, 0xb2, 0x90, 0xdb, - 0xc3, 0x5b, 0xdb, 0x41, 0xb0, 0x23, 0x57, 0xff, 0x18, 0x21, 0xf7, 0xaa, 0x50, 0x94, 0x62, 0x8a, - 0x90, 0x93, 0x64, 0x94, 0x00, 0x95, 0xfe, 0x93, 0xcb, 0x4e, 0x4c, 0x5b, 0xf4, 0xb7, 0x40, 0x81, - 0x6d, 0x21, 0xdb, 0x8a, 0x2c, 0xb9, 0x09, 0x9e, 0x79, 0xb4, 0x0d, 0x27, 0xf6, 0xeb, 0x1a, 0x8e, - 0xac, 0x0a, 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0x70, 0x1f, 0x0c, 0xd2, 0x10, 0xd7, 0xe4, - 0x7c, 0xef, 0x1d, 0x23, 0xda, 0x7b, 0xcc, 0xa1, 0x1a, 0xe2, 0x5a, 0x1a, 0x8c, 0xec, 0x1f, 0xe2, - 0x88, 0xf0, 0x1d, 0x03, 0x0c, 0x51, 0x9e, 0x17, 0x64, 0x2e, 0xd9, 0x3c, 0x01, 0xf0, 0x4c, 0xde, - 0x11, 0xff, 0x91, 0xc4, 0x2d, 0xfd, 0x33, 0x07, 0x2e, 0xf5, 0x1a, 0xba, 0x14, 0xf8, 0xb6, 0x58, - 0x84, 0x55, 0xb9, 0xaf, 0x44, 0x64, 0x3d, 0xaf, 0xef, 0xab, 0xc3, 0x66, 0xf1, 0xf1, 0x87, 0x2a, - 0xd0, 0x36, 0xe0, 0x97, 0xd5, 0x94, 0xc5, 0x26, 0xbd, 0xd4, 0x6e, 0xd8, 0x61, 0xb3, 0x38, 0xae, - 0x86, 0xb5, 0xdb, 0x0a, 0x77, 0x01, 0x74, 0x2d, 0x1a, 0xdd, 0x21, 0x96, 0x4f, 0x85, 0x5a, 0xc7, - 0xc3, 0xd2, 0x73, 0x4f, 0x3e, 0x5a, 0x50, 0xb0, 0x11, 0x95, 0x19, 0x09, 0x09, 0x6f, 0x75, 0x68, - 0x43, 0x5d, 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, - 0xc2, 0x27, 0xc0, 0xb0, 0x87, 0x29, 0xb5, 0x1a, 0x98, 0xef, 0xfd, 0x91, 0xf4, 0x50, 0x5c, 0x13, - 0x64, 0x94, 0xf0, 0x4b, 0xff, 0x32, 0xc0, 0x85, 0x5e, 0x5e, 0xbb, 0xe5, 0xd0, 0x08, 0x7e, 0xb3, - 0x23, 0xec, 0xcb, 0x8f, 0x36, 0x43, 0x36, 0x9a, 0x07, 0xbd, 0x4a, 0x25, 0x09, 0x45, 0x0b, 0xf9, - 0x3d, 0x90, 0x77, 0x22, 0xec, 0x25, 0xa7, 0x25, 0xea, 0x7f, 0xd8, 0x55, 0x4e, 0x4b, 0xf8, 0xfc, - 0x2a, 0x03, 0x42, 0x02, 0xaf, 0xf4, 0x61, 0x0e, 0x5c, 0xec, 0x35, 0x84, 0xe5, 0x71, 0xca, 0x9c, - 0x1d, 0xba, 0x31, 0xb1, 0x5c, 0x19, 0x6c, 0xca, 0xd9, 0x1b, 0x9c, 0x8a, 0x24, 0x97, 0xe5, 0x4e, - 0xea, 0xf8, 0x8d, 0xd8, 0xb5, 0x88, 0x8c, 0x24, 0x35, 0xe1, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0xcb, - 0x00, 0xd0, 0xed, 0x80, 0x44, 0x1c, 0x83, 0x57, 0x38, 0x23, 0x95, 0x33, 0x2c, 0x23, 0x54, 0x15, - 0x15, 0x69, 0x12, 0xec, 0x20, 0xd9, 0x71, 0x7c, 0x5b, 0x2e, 0xb8, 0xda, 0xbb, 0x37, 0x1d, 0xdf, - 0x46, 0x9c, 0xc3, 0xf0, 0x5d, 0x87, 0x46, 0x8c, 0x22, 0x57, 0xbb, 0xcd, 0xe1, 0x5c, 0x52, 0x49, - 0x30, 0xfc, 0x1a, 0x4b, 0xb0, 0x01, 0x71, 0x30, 0x35, 0x87, 0x52, 0xfc, 0x25, 0x45, 0x45, 0x9a, - 0x44, 0xe9, 0x2f, 0x83, 0xbd, 0xe3, 0x83, 0x25, 0x10, 0x78, 0x19, 0xe4, 0x1b, 0x24, 0x88, 0x43, - 0xe9, 0x25, 0xe5, 0xed, 0x97, 0x18, 0x11, 0x09, 0x1e, 0xfc, 0x36, 0xc8, 0xfb, 0x72, 0xc2, 0x2c, - 0x82, 0x5e, 0xed, 0xff, 0x32, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x07, 0xf2, - 0xb4, 0x16, 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x32, 0xe2, 0x61, 0xb3, 0x78, 0x3a, 0x51, - 0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1b, 0xa0, 0x20, 0x8f, 0x0b, 0x6a, 0x0e, 0xf3, 0xf0, 0x7c, - 0xad, 0xff, 0x76, 0xcb, 0xb2, 0x37, 0x5d, 0x33, 0x49, 0xa0, 0x48, 0x81, 0xc3, 0xef, 0x1a, 0x00, - 0xd4, 0xd4, 0xd9, 0x65, 0x8e, 0x70, 0x1f, 0xf6, 0x6d, 0xab, 0x68, 0xa7, 0xa2, 0x08, 0x84, 0xb4, - 0x54, 0xd2, 0x50, 0x61, 0x15, 0x4c, 0x85, 0x04, 0x73, 0xdd, 0x77, 0xfd, 0x1d, 0x3f, 0xd8, 0xf3, - 0x6f, 0x38, 0xd8, 0xb5, 0xa9, 0x09, 0xe6, 0x8c, 0xf9, 0x42, 0xe5, 0xa2, 0xb4, 0x7f, 0x6a, 0xa3, - 0x9b, 0x10, 0xea, 0x3e, 0xb6, 0xf4, 0xee, 0x40, 0xb6, 0xd6, 0xca, 0x9e, 0x17, 0xf0, 0x7d, 0x31, - 0x79, 0x91, 0x87, 0xa9, 0x69, 0xf0, 0x85, 0x78, 0xa3, 0xff, 0x0b, 0xa1, 0x72, 0x7d, 0x7a, 0x48, - 0x2b, 0x12, 0x45, 0x9a, 0x09, 0xf0, 0xa7, 0x06, 0x38, 0x6d, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, - 0xb6, 0x71, 0xee, 0x64, 0xa3, 0x7a, 0x4a, 0x1a, 0x74, 0x7a, 0x51, 0x47, 0x45, 0xed, 0x46, 0xc0, - 0x17, 0xc1, 0x19, 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x5b, 0xcd, 0xe2, 0x99, - 0x6a, 0x1b, 0x07, 0x65, 0x24, 0x4b, 0x1f, 0xe7, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, - 0x15, 0x30, 0xc4, 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, - 0x89, 0xe1, 0xb3, 0xe3, 0x69, 0x80, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x6b, - 0x00, 0xd8, 0x38, 0x24, 0x98, 0x65, 0x24, 0xdb, 0x1c, 0xe6, 0xd2, 0x6a, 0x7d, 0x96, 0x15, 0x07, - 0x69, 0x52, 0xf0, 0x06, 0x80, 0xc9, 0x3f, 0x27, 0xf0, 0x5f, 0xb5, 0x88, 0xef, 0xf8, 0x0d, 0xb3, - 0xc0, 0xcd, 0x9e, 0x66, 0xa7, 0xed, 0x72, 0x07, 0x17, 0x75, 0x19, 0x01, 0x77, 0xc1, 0x90, 0xb8, - 0x46, 0xf3, 0xbc, 0xd1, 0xc7, 0x1d, 0x77, 0xcf, 0x72, 0x1d, 0x9b, 0x43, 0x55, 0x00, 0x77, 0x0f, - 0x47, 0x41, 0x12, 0x0d, 0xbe, 0x67, 0x80, 0x31, 0x1a, 0x6f, 0x11, 0x29, 0x4d, 0x79, 0x56, 0x1f, - 0xbd, 0x76, 0xa7, 0x5f, 0xf0, 0x55, 0x4d, 0x77, 0x65, 0xa2, 0xd5, 0x2c, 0x8e, 0xe9, 0x14, 0xd4, - 0x86, 0x0d, 0x7f, 0x6f, 0x00, 0xd3, 0xb2, 0x45, 0xe8, 0x5b, 0xee, 0x06, 0x71, 0xfc, 0x08, 0x13, - 0x71, 0x21, 0x12, 0xc7, 0x47, 0x1f, 0x6b, 0xc5, 0xec, 0x3d, 0xab, 0x32, 0x27, 0x57, 0xda, 0x5c, - 0xec, 0x61, 0x01, 0xea, 0x69, 0x5b, 0xe9, 0xdf, 0x46, 0x36, 0xb5, 0x68, 0xb3, 0xac, 0xd6, 0x2c, - 0x17, 0xc3, 0x65, 0x30, 0xc1, 0xaa, 0x5f, 0x84, 0x43, 0xd7, 0xa9, 0x59, 0x94, 0xdf, 0x7e, 0x44, - 0x74, 0xab, 0x6b, 0x78, 0x35, 0xc3, 0x47, 0x1d, 0x23, 0xe0, 0x2b, 0x00, 0x8a, 0xb2, 0xb0, 0x4d, - 0x8f, 0xa8, 0x04, 0x54, 0x81, 0x57, 0xed, 0x90, 0x40, 0x5d, 0x46, 0xc1, 0x25, 0x30, 0xe9, 0x5a, - 0x5b, 0xd8, 0xad, 0x62, 0x17, 0xd7, 0xa2, 0x80, 0x70, 0x55, 0xe2, 0x7e, 0x38, 0xd5, 0x6a, 0x16, - 0x27, 0x6f, 0x65, 0x99, 0xa8, 0x53, 0xbe, 0x74, 0x29, 0xbb, 0x97, 0xf5, 0x89, 0x8b, 0x62, 0xfb, - 0x83, 0x1c, 0x98, 0xe9, 0x1d, 0x14, 0xf0, 0x3b, 0xaa, 0x34, 0x16, 0x15, 0xdf, 0x6b, 0x27, 0x10, - 0x7a, 0xf2, 0x3a, 0x00, 0x3a, 0xaf, 0x02, 0xf0, 0x80, 0x9d, 0xd7, 0x96, 0x9b, 0x5c, 0xfb, 0x37, - 0x4f, 0x02, 0x9d, 0xe9, 0xaf, 0x8c, 0x88, 0x2a, 0xc0, 0x72, 0xf9, 0xa1, 0x6f, 0xb9, 0xb8, 0xf4, - 0x61, 0xc7, 0xd5, 0x36, 0xdd, 0xac, 0xf0, 0x07, 0x06, 0x18, 0x0f, 0x42, 0xec, 0x2f, 0x6e, 0xac, - 0xde, 0xfb, 0xa2, 0xd8, 0xb4, 0xd2, 0x41, 0xab, 0x47, 0x37, 0x91, 0xdd, 0xaf, 0x85, 0xae, 0x0d, - 0x12, 0x84, 0xb4, 0x72, 0xb6, 0xd5, 0x2c, 0x8e, 0xaf, 0xb7, 0xa3, 0xa0, 0x2c, 0x6c, 0xc9, 0x03, - 0x53, 0x2b, 0xfb, 0x11, 0x26, 0xbe, 0xe5, 0x2e, 0x07, 0xb5, 0xd8, 0xc3, 0x7e, 0x24, 0x6c, 0xcc, - 0xb4, 0x0b, 0x8c, 0x47, 0x6c, 0x17, 0x5c, 0x04, 0x03, 0x31, 0x71, 0x65, 0xd4, 0x8e, 0xaa, 0x26, - 0x18, 0xba, 0x85, 0x18, 0xbd, 0x74, 0x09, 0x0c, 0x32, 0x3b, 0xe1, 0x79, 0x30, 0x40, 0xac, 0x3d, - 0xae, 0x75, 0xac, 0x32, 0xcc, 0x44, 0x90, 0xb5, 0x87, 0x18, 0xad, 0xf4, 0xf7, 0x39, 0x30, 0x9e, - 0x99, 0x0b, 0x9c, 0x01, 0x39, 0xd5, 0x59, 0x03, 0x52, 0x69, 0x6e, 0x75, 0x19, 0xe5, 0x1c, 0x1b, - 0xbe, 0xa0, 0xb2, 0xab, 0x00, 0x2d, 0xaa, 0xc3, 0x82, 0x53, 0x59, 0x59, 0x96, 0xaa, 0x63, 0x86, - 0x24, 0xe9, 0x91, 0xd9, 0x80, 0xeb, 0x72, 0x57, 0x08, 0x1b, 0x70, 0x1d, 0x31, 0xda, 0x51, 0x7b, - 0x25, 0x49, 0xb3, 0x26, 0xff, 0x08, 0xcd, 0x9a, 0xa1, 0x07, 0x36, 0x6b, 0x2e, 0x83, 0x7c, 0xe4, - 0x44, 0x2e, 0xe6, 0x27, 0x95, 0x56, 0x0c, 0xdf, 0x61, 0x44, 0x24, 0x78, 0x10, 0x83, 0x61, 0x1b, - 0xd7, 0xad, 0xd8, 0x8d, 0xf8, 0xa1, 0x34, 0x7a, 0xed, 0xeb, 0xc7, 0x8b, 0x1e, 0xd1, 0xcc, 0x58, - 0x16, 0x2a, 0x51, 0xa2, 0x1b, 0x3e, 0x0e, 0x86, 0x3d, 0x6b, 0xdf, 0xf1, 0x62, 0x8f, 0x57, 0x8c, - 0x86, 0x10, 0x5b, 0x13, 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0xfb, 0x35, 0x37, 0xa6, 0xce, 0x2e, - 0x96, 0x4c, 0x59, 0xd2, 0xa9, 0x24, 0xb8, 0x92, 0xe1, 0xa3, 0x8e, 0x11, 0x1c, 0xcc, 0xf1, 0xf9, - 0xe0, 0x51, 0x0d, 0x4c, 0x90, 0x50, 0xc2, 0x6b, 0x07, 0x93, 0xf2, 0x63, 0xbd, 0xc0, 0xe4, 0xe0, - 0x8e, 0x11, 0xf0, 0x29, 0x30, 0xe2, 0x59, 0xfb, 0xb7, 0xb0, 0xdf, 0x88, 0xb6, 0xcd, 0xd3, 0x73, - 0xc6, 0xfc, 0x40, 0xe5, 0x74, 0xab, 0x59, 0x1c, 0x59, 0x4b, 0x88, 0x28, 0xe5, 0x73, 0x61, 0xc7, - 0x97, 0xc2, 0x67, 0x34, 0xe1, 0x84, 0x88, 0x52, 0x3e, 0xab, 0x4c, 0x42, 0x2b, 0x62, 0xfb, 0xca, - 0x1c, 0x6f, 0xbf, 0x38, 0x6f, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x1e, 0x14, 0x3c, 0x6b, 0x9f, 0xdf, - 0x29, 0xcd, 0x09, 0xae, 0x96, 0x37, 0x14, 0xd7, 0x24, 0x0d, 0x29, 0x2e, 0x97, 0x74, 0x7c, 0x21, - 0x39, 0xa9, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x16, 0xbf, 0xb1, 0xef, 0xdc, 0x8f, 0xb1, 0x10, 0x86, - 0xdc, 0x33, 0x2a, 0x7e, 0xef, 0xa6, 0x2c, 0xa4, 0xcb, 0xb1, 0x3b, 0x9d, 0x17, 0xbb, 0x91, 0x13, - 0xba, 0x78, 0xbd, 0x6e, 0x9e, 0xe5, 0xfe, 0xe7, 0xa5, 0xfc, 0x9a, 0xa2, 0x22, 0x4d, 0x02, 0xbe, - 0x05, 0x06, 0xb1, 0x1f, 0x7b, 0xe6, 0x39, 0x7e, 0x7c, 0x1f, 0x37, 0xfa, 0xd4, 0x7e, 0x59, 0xf1, - 0x63, 0x0f, 0x71, 0xcd, 0xf0, 0x05, 0x70, 0xda, 0xb3, 0xf6, 0x59, 0x12, 0xc0, 0x24, 0x62, 0x17, - 0xcd, 0x29, 0x3e, 0xef, 0x49, 0x56, 0xc4, 0xae, 0xe9, 0x0c, 0xd4, 0x2e, 0xc7, 0x07, 0x3a, 0xbe, - 0x36, 0x70, 0x5a, 0x1b, 0xa8, 0x33, 0x50, 0xbb, 0x1c, 0x73, 0x32, 0xc1, 0xf7, 0x63, 0x87, 0x60, - 0xdb, 0xfc, 0x3f, 0x5e, 0xf7, 0xca, 0xfe, 0xae, 0xa0, 0x21, 0xc5, 0x85, 0xf7, 0x93, 0x96, 0x83, - 0xc9, 0x37, 0xdf, 0x46, 0xdf, 0x52, 0xf7, 0x3a, 0x59, 0x24, 0xc4, 0x3a, 0x10, 0xa7, 0x8a, 0xde, - 0x6c, 0x80, 0x3e, 0xc8, 0x5b, 0xae, 0xbb, 0x5e, 0x37, 0xcf, 0x73, 0x8f, 0xf7, 0xf1, 0xb4, 0x50, - 0x19, 0x66, 0x91, 0xe9, 0x47, 0x02, 0x86, 0xe1, 0x05, 0x3e, 0x8b, 0x85, 0x99, 0x13, 0xc3, 0x5b, - 0x67, 0xfa, 0x91, 0x80, 0xe1, 0xf3, 0xf3, 0x0f, 0xd6, 0xeb, 0xe6, 0x63, 0x27, 0x37, 0x3f, 0xa6, - 0x1f, 0x09, 0x18, 0x68, 0x83, 0x01, 0x3f, 0x88, 0xcc, 0x0b, 0xfd, 0x3e, 0x7b, 0xf9, 0x69, 0x72, - 0x3b, 0x88, 0x10, 0x53, 0x0f, 0x7f, 0x64, 0x00, 0x10, 0xa6, 0x91, 0x78, 0xf1, 0xb8, 0x2d, 0x80, - 0x0c, 0x5a, 0x39, 0x8d, 0xde, 0x15, 0x3f, 0x22, 0x07, 0xe9, 0xbd, 0x46, 0x8b, 0x72, 0xcd, 0x00, - 0xf8, 0x73, 0x03, 0x9c, 0xd3, 0xcb, 0x5d, 0x65, 0xd9, 0x2c, 0xf7, 0xc3, 0x7a, 0x1f, 0x03, 0xb9, - 0x12, 0x04, 0x6e, 0xc5, 0x6c, 0x35, 0x8b, 0xe7, 0x16, 0xbb, 0x00, 0xa2, 0xae, 0x66, 0xc0, 0x5f, - 0x1b, 0x60, 0x52, 0x66, 0x47, 0xcd, 0xb8, 0x22, 0x77, 0xdb, 0x5b, 0x7d, 0x74, 0x5b, 0x16, 0x42, - 0x78, 0x4f, 0x7d, 0x65, 0xec, 0xe0, 0xa3, 0x4e, 0xab, 0xe0, 0xef, 0x0c, 0x30, 0x66, 0xe3, 0x10, - 0xfb, 0x36, 0xf6, 0x6b, 0xcc, 0xcc, 0xb9, 0xe3, 0xf6, 0x15, 0xb2, 0x66, 0x2e, 0x6b, 0xda, 0x85, - 0x85, 0x65, 0x69, 0xe1, 0x98, 0xce, 0x3a, 0x6c, 0x16, 0xa7, 0xd3, 0xa1, 0x3a, 0x07, 0xb5, 0x19, - 0x08, 0x7f, 0x6c, 0x80, 0xf1, 0xd4, 0xed, 0xe2, 0x80, 0xb8, 0x74, 0x32, 0x0b, 0xcf, 0x4b, 0xd0, - 0xc5, 0x76, 0x2c, 0x94, 0x05, 0x87, 0xbf, 0x31, 0x58, 0xb5, 0x95, 0xdc, 0xd5, 0xa8, 0x59, 0xe2, - 0x1e, 0x7c, 0xbd, 0x9f, 0x1e, 0x54, 0xca, 0x85, 0x03, 0xaf, 0xa6, 0x95, 0x9c, 0xe2, 0x1c, 0x36, - 0x8b, 0x53, 0xba, 0xff, 0x14, 0x03, 0xe9, 0xc6, 0xc1, 0x77, 0x0d, 0x30, 0x86, 0xd3, 0x82, 0x99, - 0x9a, 0x97, 0x8f, 0xeb, 0xba, 0xae, 0xe5, 0xb7, 0xb8, 0x4e, 0x6b, 0x2c, 0x8a, 0xda, 0x60, 0x59, - 0xed, 0x87, 0xf7, 0x2d, 0x2f, 0x74, 0xb1, 0xf9, 0xff, 0xfd, 0xab, 0xfd, 0x56, 0x84, 0x4a, 0x94, - 0xe8, 0x86, 0x57, 0x41, 0xc1, 0x8f, 0x5d, 0xd7, 0xda, 0x72, 0xb1, 0xf9, 0x38, 0xaf, 0x22, 0x54, - 0x7f, 0xf1, 0xb6, 0xa4, 0x23, 0x25, 0x01, 0xeb, 0x60, 0x6e, 0xff, 0xa6, 0x7a, 0x7c, 0xd1, 0xb5, - 0x81, 0x67, 0x5e, 0xe1, 0x5a, 0x66, 0x5a, 0xcd, 0xe2, 0xf4, 0x66, 0xf7, 0x16, 0xdf, 0x43, 0x75, - 0xc0, 0x37, 0xc0, 0x63, 0x9a, 0xcc, 0x8a, 0xb7, 0x85, 0x6d, 0x1b, 0xdb, 0xc9, 0x45, 0xcb, 0xfc, - 0x02, 0x87, 0x50, 0xfb, 0x78, 0x33, 0x2b, 0x80, 0x1e, 0x34, 0x1a, 0xde, 0x02, 0xd3, 0x1a, 0x7b, - 0xd5, 0x8f, 0xd6, 0x49, 0x35, 0x22, 0x8e, 0xdf, 0x30, 0xe7, 0xb9, 0xde, 0x73, 0xc9, 0xee, 0xdb, - 0xd4, 0x78, 0xa8, 0xc7, 0x18, 0xf8, 0x72, 0x9b, 0x36, 0xfe, 0xe1, 0xc2, 0x0a, 0x6f, 0xe2, 0x03, - 0x6a, 0x3e, 0xc1, 0x8b, 0x0b, 0xbe, 0xce, 0x9b, 0x1a, 0x1d, 0xf5, 0x90, 0x87, 0xdf, 0x00, 0x67, - 0x33, 0x1c, 0x76, 0xaf, 0x30, 0x9f, 0x14, 0x17, 0x04, 0x56, 0x89, 0x6e, 0x26, 0x44, 0xd4, 0x4d, - 0x12, 0x7e, 0x15, 0x40, 0x8d, 0xbc, 0x66, 0x85, 0x7c, 0xfc, 0x53, 0xe2, 0xae, 0xc2, 0x56, 0x74, - 0x53, 0xd2, 0x50, 0x17, 0x39, 0xf8, 0x81, 0xd1, 0x36, 0x93, 0xf4, 0x36, 0x4b, 0xcd, 0xab, 0x7c, - 0xc3, 0xbe, 0x7c, 0xf4, 0x00, 0x4c, 0x95, 0xa1, 0xd8, 0xc5, 0x9a, 0x87, 0x35, 0x14, 0xd4, 0x03, - 0x7d, 0x86, 0x5d, 0xa6, 0x33, 0x39, 0x1c, 0x4e, 0x80, 0x81, 0x1d, 0x2c, 0x3f, 0x1b, 0x23, 0xf6, - 0x13, 0xbe, 0x09, 0xf2, 0xbb, 0x96, 0x1b, 0x27, 0xad, 0x80, 0xfe, 0x9d, 0xf5, 0x48, 0xe8, 0x7d, - 0x31, 0x77, 0xdd, 0x98, 0x79, 0xdf, 0x00, 0xd3, 0xdd, 0x4f, 0x95, 0xcf, 0xcb, 0xa2, 0x9f, 0x19, - 0x60, 0xb2, 0xe3, 0x00, 0xe9, 0x62, 0x8c, 0xdb, 0x6e, 0xcc, 0xbd, 0x3e, 0x9e, 0x04, 0x62, 0x23, - 0xf0, 0x8a, 0x56, 0xb7, 0xec, 0x87, 0x06, 0x98, 0xc8, 0x26, 0xe6, 0xcf, 0xc9, 0x4b, 0xa5, 0xf7, - 0x72, 0x60, 0xba, 0x7b, 0x0d, 0x0e, 0x3d, 0xd5, 0x5d, 0xe8, 0x7b, 0x83, 0xa6, 0x5b, 0xcb, 0xf6, - 0x1d, 0x03, 0x8c, 0xbe, 0xad, 0xe4, 0x92, 0xaf, 0x99, 0xfd, 0xec, 0x0a, 0x25, 0x47, 0x5f, 0xca, - 0xa0, 0x48, 0x87, 0x2c, 0xfd, 0xd6, 0x00, 0x53, 0x5d, 0x8f, 0x73, 0x78, 0x05, 0x0c, 0x59, 0xae, - 0x1b, 0xec, 0x89, 0x6e, 0x9e, 0xd6, 0x96, 0x5f, 0xe4, 0x54, 0x24, 0xb9, 0x9a, 0xcf, 0x72, 0x9f, - 0x81, 0xcf, 0x4a, 0x7f, 0x30, 0xc0, 0x85, 0x07, 0x45, 0xdd, 0x67, 0xbd, 0x86, 0xf3, 0xa0, 0x20, - 0x8b, 0xed, 0x03, 0xbe, 0x7e, 0x32, 0xbb, 0xca, 0x8c, 0xc0, 0x5f, 0xcb, 0x88, 0x5f, 0xa5, 0x5f, - 0x1a, 0x60, 0xa2, 0x8a, 0xc9, 0xae, 0x53, 0xc3, 0x08, 0xd7, 0x31, 0xc1, 0x7e, 0x0d, 0xc3, 0x05, - 0x30, 0xc2, 0xbf, 0x36, 0x86, 0x56, 0x2d, 0xf9, 0x46, 0x32, 0x29, 0x1d, 0x3d, 0x72, 0x3b, 0x61, - 0xa0, 0x54, 0x46, 0x7d, 0x4f, 0xc9, 0xf5, 0xfc, 0x9e, 0x72, 0x01, 0x0c, 0x86, 0x69, 0x03, 0xb8, - 0xc0, 0xb8, 0xbc, 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0xf2, 0x92, 0x1b, 0x90, - 0x08, 0x71, 0x6a, 0xe9, 0x4f, 0x39, 0x70, 0xa6, 0x3d, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x1d, 0x1f, - 0x70, 0x18, 0x0f, 0x71, 0x8e, 0xfe, 0x6e, 0x20, 0xf7, 0xe0, 0x77, 0x03, 0xf0, 0x25, 0x30, 0x29, - 0x7f, 0xae, 0xec, 0x87, 0x04, 0x53, 0xfe, 0x65, 0x72, 0xa0, 0xfd, 0xbd, 0xdf, 0x5a, 0x56, 0x00, - 0x75, 0x8e, 0x81, 0x5f, 0xc9, 0xbc, 0x69, 0xb8, 0x9c, 0xbe, 0x67, 0x60, 0xb5, 0x1d, 0x2f, 0x1d, - 0xee, 0xb1, 0x2d, 0xbf, 0x42, 0x48, 0x40, 0x32, 0x0f, 0x1d, 0x16, 0xc0, 0x48, 0x9d, 0x09, 0xf0, - 0x3e, 0x79, 0xbe, 0xdd, 0xe9, 0x37, 0x12, 0x06, 0x4a, 0x65, 0xe0, 0xd7, 0xc0, 0x78, 0x10, 0x8a, - 0x2a, 0x76, 0xdd, 0xb5, 0xab, 0xd8, 0xad, 0xf3, 0x8e, 0x5e, 0x21, 0x69, 0xbb, 0xb6, 0xb1, 0x50, - 0x56, 0xb6, 0xf4, 0x47, 0x03, 0x9c, 0x4d, 0x1e, 0x13, 0xb9, 0x0e, 0xf6, 0xa3, 0xa5, 0xc0, 0xaf, - 0x3b, 0x0d, 0x78, 0x5e, 0xb4, 0x4f, 0xb5, 0x9e, 0x64, 0xd2, 0x3a, 0x85, 0xf7, 0xc1, 0x30, 0x15, - 0xb1, 0x22, 0xc3, 0xf8, 0x95, 0xa3, 0x87, 0x71, 0x36, 0xe8, 0x44, 0xf5, 0x97, 0x50, 0x13, 0x1c, - 0x16, 0xc9, 0x35, 0xab, 0x12, 0xfb, 0xb6, 0x6c, 0xa1, 0x8f, 0x89, 0x48, 0x5e, 0x5a, 0x14, 0x34, - 0xa4, 0xb8, 0xa5, 0x7f, 0x18, 0x60, 0xb2, 0xe3, 0x71, 0x14, 0xfc, 0x9e, 0x01, 0xc6, 0x6a, 0xda, - 0xf4, 0x64, 0x3e, 0x58, 0x3b, 0xfe, 0x03, 0x2c, 0x4d, 0xa9, 0x28, 0xa1, 0x74, 0x0a, 0x6a, 0x03, - 0x85, 0x9b, 0xc0, 0xac, 0x65, 0xde, 0x21, 0x66, 0xbe, 0x6c, 0x5e, 0x68, 0x35, 0x8b, 0xe6, 0x52, - 0x0f, 0x19, 0xd4, 0x73, 0x74, 0xe5, 0x5b, 0x1f, 0x7d, 0x3a, 0x7b, 0xea, 0xe3, 0x4f, 0x67, 0x4f, - 0x7d, 0xf2, 0xe9, 0xec, 0xa9, 0x77, 0x5a, 0xb3, 0xc6, 0x47, 0xad, 0x59, 0xe3, 0xe3, 0xd6, 0xac, - 0xf1, 0x49, 0x6b, 0xd6, 0xf8, 0x6b, 0x6b, 0xd6, 0xf8, 0xc9, 0xdf, 0x66, 0x4f, 0xbd, 0x7e, 0xfd, - 0xa8, 0xaf, 0x8f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x28, 0x77, 0xf5, 0x22, 0xd1, 0x2c, 0x00, - 0x00, + proto.RegisterFile("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_c5e101a0235c8c62) +} + +var fileDescriptor_c5e101a0235c8c62 = []byte{ + // 3166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdb, 0x6f, 0x1b, 0xc7, + 0xd5, 0xf7, 0x52, 0x37, 0x6a, 0x24, 0x59, 0xd2, 0xd8, 0xd2, 0xb7, 0x56, 0x6c, 0x51, 0xa6, 0xbf, + 0xf8, 0x53, 0x12, 0x87, 0x4a, 0xf4, 0x25, 0x8d, 0x9b, 0x5e, 0x02, 0x51, 0x92, 0x13, 0xc5, 0x92, + 0x25, 0x0c, 0x6d, 0x47, 0x49, 0x8a, 0x26, 0x2b, 0xee, 0x90, 0xda, 0x68, 0xb9, 0xbb, 0x9e, 0xd9, + 0xd5, 0x05, 0x68, 0x81, 0xa0, 0x45, 0xd0, 0x36, 0x40, 0x9b, 0x3e, 0x14, 0xe9, 0x53, 0x51, 0x14, + 0x45, 0x1e, 0xda, 0x87, 0xf6, 0xad, 0xfd, 0x17, 0xf2, 0x52, 0x20, 0x40, 0x81, 0x22, 0x40, 0x01, + 0xa2, 0x61, 0xff, 0x81, 0x02, 0x6d, 0x51, 0x54, 0x0f, 0x45, 0x31, 0x97, 0x9d, 0x9d, 0x5d, 0x92, + 0xb6, 0x61, 0x51, 0xc9, 0x1b, 0x79, 0xce, 0x99, 0xf3, 0x3b, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, + 0x07, 0xbc, 0xb2, 0x77, 0x9d, 0x96, 0x1c, 0x7f, 0xc1, 0x0a, 0x1c, 0x7c, 0x18, 0x62, 0x8f, 0x3a, + 0xbe, 0x47, 0x9f, 0xb6, 0x02, 0x87, 0x62, 0xb2, 0x8f, 0xc9, 0x42, 0xb0, 0x57, 0x67, 0x3c, 0x9a, + 0x16, 0x58, 0xd8, 0x7f, 0x76, 0xa1, 0x8e, 0x3d, 0x4c, 0xac, 0x10, 0xdb, 0xa5, 0x80, 0xf8, 0xa1, + 0x0f, 0xaf, 0x0b, 0x4d, 0xa5, 0x94, 0xe0, 0x5b, 0x4a, 0x53, 0x29, 0xd8, 0xab, 0x33, 0x1e, 0x4d, + 0x0b, 0x94, 0xf6, 0x9f, 0x9d, 0x79, 0xba, 0xee, 0x84, 0xbb, 0xd1, 0x4e, 0xa9, 0xea, 0x37, 0x16, + 0xea, 0x7e, 0xdd, 0x5f, 0xe0, 0x0a, 0x77, 0xa2, 0x1a, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x02, 0x68, + 0xe6, 0xb9, 0xc4, 0xe4, 0x86, 0x55, 0xdd, 0x75, 0x3c, 0x4c, 0x8e, 0x12, 0x3b, 0x1b, 0x38, 0xb4, + 0x3a, 0x98, 0x37, 0xb3, 0xd0, 0x6d, 0x14, 0x89, 0xbc, 0xd0, 0x69, 0xe0, 0xb6, 0x01, 0x5f, 0x7a, + 0xd0, 0x00, 0x5a, 0xdd, 0xc5, 0x0d, 0x2b, 0x3b, 0xae, 0x78, 0x6c, 0x80, 0xc9, 0x65, 0xdf, 0xdb, + 0xc7, 0x84, 0x4d, 0x10, 0xe1, 0x7b, 0x11, 0xa6, 0x21, 0x2c, 0x83, 0xbe, 0xc8, 0xb1, 0x4d, 0x63, + 0xce, 0x98, 0x1f, 0x2e, 0x3f, 0xf3, 0x71, 0xb3, 0x70, 0xa6, 0xd5, 0x2c, 0xf4, 0xdd, 0x59, 0x5b, + 0x39, 0x6e, 0x16, 0x2e, 0x77, 0x43, 0x0a, 0x8f, 0x02, 0x4c, 0x4b, 0x77, 0xd6, 0x56, 0x10, 0x1b, + 0x0c, 0x5f, 0x06, 0x93, 0x36, 0xa6, 0x0e, 0xc1, 0xf6, 0xd2, 0xd6, 0xda, 0x5d, 0xa1, 0xdf, 0xcc, + 0x71, 0x8d, 0x17, 0xa4, 0xc6, 0xc9, 0x95, 0xac, 0x00, 0x6a, 0x1f, 0x03, 0xb7, 0xc1, 0x90, 0xbf, + 0xf3, 0x0e, 0xae, 0x86, 0xd4, 0xec, 0x9b, 0xeb, 0x9b, 0x1f, 0x59, 0x7c, 0xba, 0x94, 0x2c, 0x9e, + 0x32, 0x81, 0xaf, 0x98, 0x9c, 0x6c, 0x09, 0x59, 0x07, 0xab, 0xf1, 0xa2, 0x95, 0xc7, 0x25, 0xda, + 0xd0, 0xa6, 0xd0, 0x82, 0x62, 0x75, 0xc5, 0x5f, 0xe6, 0x00, 0xd4, 0x27, 0x4f, 0x03, 0xdf, 0xa3, + 0xb8, 0x27, 0xb3, 0xa7, 0x60, 0xa2, 0xca, 0x35, 0x87, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x51, 0xac, + 0x37, 0x25, 0xfe, 0xc4, 0x72, 0x46, 0x1d, 0x6a, 0x03, 0x80, 0xb7, 0xc1, 0x20, 0xc1, 0x34, 0x72, + 0x43, 0xb3, 0x6f, 0xce, 0x98, 0x1f, 0x59, 0xbc, 0xd6, 0x15, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xd2, + 0xfe, 0xb3, 0xa5, 0x4a, 0x68, 0x85, 0x11, 0x2d, 0x9f, 0x95, 0x48, 0x83, 0x88, 0xeb, 0x40, 0x52, + 0x57, 0xf1, 0x3f, 0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xef, 0xe0, 0x03, 0x48, 0xc0, 0x10, 0x11, 0xc1, + 0xc2, 0xfd, 0x34, 0xb2, 0x78, 0xb3, 0xf4, 0xa8, 0x3b, 0xaa, 0xd4, 0x16, 0x7f, 0xe5, 0x11, 0xb6, + 0x5c, 0xf2, 0x0f, 0x8a, 0x81, 0xe0, 0x3e, 0xc8, 0x13, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0x16, 0xd7, + 0x7b, 0x03, 0x2a, 0x74, 0x96, 0x47, 0x5b, 0xcd, 0x42, 0x3e, 0xfe, 0x87, 0x14, 0x56, 0xf1, 0xe7, + 0x39, 0x30, 0xbb, 0x1c, 0xd1, 0xd0, 0x6f, 0x20, 0x4c, 0xfd, 0x88, 0x54, 0xf1, 0xb2, 0xef, 0x46, + 0x0d, 0x6f, 0x05, 0xd7, 0x1c, 0xcf, 0x09, 0x59, 0x8c, 0xce, 0x81, 0x7e, 0xcf, 0x6a, 0x60, 0x19, + 0x33, 0xa3, 0xd2, 0x93, 0xfd, 0xb7, 0xac, 0x06, 0x46, 0x9c, 0xc3, 0x24, 0x58, 0x88, 0xc8, 0x1d, + 0xa0, 0x24, 0x6e, 0x1f, 0x05, 0x18, 0x71, 0x0e, 0xbc, 0x0a, 0x06, 0x6b, 0x3e, 0x69, 0x58, 0x62, + 0xf5, 0x86, 0x93, 0xf5, 0xb8, 0xc1, 0xa9, 0x48, 0x72, 0xe1, 0xf3, 0x60, 0xc4, 0xc6, 0xb4, 0x4a, + 0x9c, 0x80, 0x41, 0x9b, 0xfd, 0x5c, 0xf8, 0x9c, 0x14, 0x1e, 0x59, 0x49, 0x58, 0x48, 0x97, 0x83, + 0xd7, 0x40, 0x3e, 0x20, 0x8e, 0x4f, 0x9c, 0xf0, 0xc8, 0x1c, 0x98, 0x33, 0xe6, 0x07, 0xca, 0x13, + 0x72, 0x4c, 0x7e, 0x4b, 0xd2, 0x91, 0x92, 0x60, 0xd2, 0xef, 0x50, 0xdf, 0xdb, 0xb2, 0xc2, 0x5d, + 0x73, 0x90, 0x23, 0x28, 0xe9, 0x57, 0x2b, 0x9b, 0xb7, 0x18, 0x1d, 0x29, 0x89, 0xe2, 0x9f, 0x0c, + 0x60, 0x66, 0x3d, 0x14, 0xbb, 0x17, 0xde, 0x00, 0x79, 0x1a, 0xb2, 0x9c, 0x53, 0x3f, 0x92, 0xfe, + 0x79, 0x32, 0x56, 0x55, 0x91, 0xf4, 0xe3, 0x66, 0x61, 0x3a, 0x19, 0x11, 0x53, 0xb9, 0x6f, 0xd4, + 0x58, 0x16, 0x72, 0x07, 0x78, 0x67, 0xd7, 0xf7, 0xf7, 0xe4, 0xea, 0x9f, 0x20, 0xe4, 0x5e, 0x13, + 0x8a, 0x12, 0x4c, 0x11, 0x72, 0x92, 0x8c, 0x62, 0xa0, 0xe2, 0xbf, 0x73, 0xd9, 0x89, 0x69, 0x8b, + 0xfe, 0x36, 0xc8, 0xb3, 0x2d, 0x64, 0x5b, 0xa1, 0x25, 0x37, 0xc1, 0x33, 0x0f, 0xb7, 0xe1, 0xc4, + 0x7e, 0xdd, 0xc0, 0xa1, 0x55, 0x86, 0xd2, 0x15, 0x20, 0xa1, 0x21, 0xa5, 0x15, 0x1e, 0x82, 0x7e, + 0x1a, 0xe0, 0xaa, 0x9c, 0xef, 0xdd, 0x13, 0x44, 0x7b, 0x97, 0x39, 0x54, 0x02, 0x5c, 0x4d, 0x82, + 0x91, 0xfd, 0x43, 0x1c, 0x11, 0xbe, 0x6b, 0x80, 0x41, 0xca, 0xf3, 0x82, 0xcc, 0x25, 0xdb, 0xa7, + 0x00, 0x9e, 0xc9, 0x3b, 0xe2, 0x3f, 0x92, 0xb8, 0xc5, 0x7f, 0xe4, 0xc0, 0xe5, 0x6e, 0x43, 0x97, + 0x7d, 0xcf, 0x16, 0x8b, 0xb0, 0x26, 0xf7, 0x95, 0x88, 0xac, 0xe7, 0xf5, 0x7d, 0x75, 0xdc, 0x2c, + 0x3c, 0xfe, 0x40, 0x05, 0xda, 0x06, 0xfc, 0xb2, 0x9a, 0xb2, 0xd8, 0xa4, 0x97, 0xd3, 0x86, 0x1d, + 0x37, 0x0b, 0xe3, 0x6a, 0x58, 0xda, 0x56, 0xb8, 0x0f, 0xa0, 0x6b, 0xd1, 0xf0, 0x36, 0xb1, 0x3c, + 0x2a, 0xd4, 0x3a, 0x0d, 0x2c, 0x3d, 0xf7, 0xe4, 0xc3, 0x05, 0x05, 0x1b, 0x51, 0x9e, 0x91, 0x90, + 0x70, 0xbd, 0x4d, 0x1b, 0xea, 0x80, 0xc0, 0x72, 0x06, 0xc1, 0x16, 0x55, 0x69, 0x40, 0xcb, 0xe1, + 0x8c, 0x8a, 0x24, 0x17, 0x3e, 0x01, 0x86, 0x1a, 0x98, 0x52, 0xab, 0x8e, 0xf9, 0xde, 0x1f, 0x4e, + 0x0e, 0xc5, 0x0d, 0x41, 0x46, 0x31, 0xbf, 0xf8, 0x4f, 0x03, 0x5c, 0xec, 0xe6, 0xb5, 0x75, 0x87, + 0x86, 0xf0, 0x1b, 0x6d, 0x61, 0x5f, 0x7a, 0xb8, 0x19, 0xb2, 0xd1, 0x3c, 0xe8, 0x55, 0x2a, 0x89, + 0x29, 0x5a, 0xc8, 0x1f, 0x80, 0x01, 0x27, 0xc4, 0x8d, 0xf8, 0xb4, 0x44, 0xbd, 0x0f, 0xbb, 0xf2, + 0x98, 0x84, 0x1f, 0x58, 0x63, 0x40, 0x48, 0xe0, 0x15, 0x3f, 0xca, 0x81, 0x4b, 0xdd, 0x86, 0xb0, + 0x3c, 0x4e, 0x99, 0xb3, 0x03, 0x37, 0x22, 0x96, 0x2b, 0x83, 0x4d, 0x39, 0x7b, 0x8b, 0x53, 0x91, + 0xe4, 0xb2, 0xdc, 0x49, 0x1d, 0xaf, 0x1e, 0xb9, 0x16, 0x91, 0x91, 0xa4, 0x26, 0x5c, 0x91, 0x74, + 0xa4, 0x24, 0x60, 0x09, 0x00, 0xba, 0xeb, 0x93, 0x90, 0x63, 0xf0, 0x0a, 0x67, 0xb8, 0x7c, 0x96, + 0x65, 0x84, 0x8a, 0xa2, 0x22, 0x4d, 0x82, 0x1d, 0x24, 0x7b, 0x8e, 0x67, 0xcb, 0x05, 0x57, 0x7b, + 0xf7, 0xa6, 0xe3, 0xd9, 0x88, 0x73, 0x18, 0xbe, 0xeb, 0xd0, 0x90, 0x51, 0xe4, 0x6a, 0xa7, 0x1c, + 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0x04, 0xeb, 0x13, 0x07, 0x53, 0x73, 0x30, 0xc1, 0x5f, + 0x56, 0x54, 0xa4, 0x49, 0x14, 0xff, 0xdc, 0xdf, 0x3d, 0x3e, 0x58, 0x02, 0x81, 0x57, 0xc0, 0x40, + 0x9d, 0xf8, 0x51, 0x20, 0xbd, 0xa4, 0xbc, 0xfd, 0x32, 0x23, 0x22, 0xc1, 0x83, 0xdf, 0x02, 0x03, + 0x9e, 0x9c, 0x30, 0x8b, 0xa0, 0xd7, 0x7a, 0xbf, 0xcc, 0xdc, 0x5b, 0x09, 0xba, 0x70, 0xa4, 0x00, + 0x85, 0xcf, 0x81, 0x01, 0x5a, 0xf5, 0x03, 0x2c, 0x9d, 0x38, 0x1b, 0x0b, 0x55, 0x18, 0xf1, 0xb8, + 0x59, 0x18, 0x8b, 0xd5, 0x71, 0x02, 0x12, 0xc2, 0xf0, 0x7b, 0x06, 0xc8, 0xcb, 0xe3, 0x82, 0x9a, + 0x43, 0x3c, 0x3c, 0x5f, 0xef, 0xbd, 0xdd, 0xb2, 0xec, 0x4d, 0xd6, 0x4c, 0x12, 0x28, 0x52, 0xe0, + 0xf0, 0x3b, 0x06, 0x00, 0x55, 0x75, 0x76, 0x99, 0xc3, 0xdc, 0x87, 0x3d, 0xdb, 0x2a, 0xda, 0xa9, + 0x28, 0x02, 0x21, 0x29, 0x95, 0x34, 0x54, 0x58, 0x01, 0x53, 0x01, 0xc1, 0x5c, 0xf7, 0x1d, 0x6f, + 0xcf, 0xf3, 0x0f, 0xbc, 0x1b, 0x0e, 0x76, 0x6d, 0x6a, 0x82, 0x39, 0x63, 0x3e, 0x5f, 0xbe, 0x24, + 0xed, 0x9f, 0xda, 0xea, 0x24, 0x84, 0x3a, 0x8f, 0x2d, 0xbe, 0xd7, 0x97, 0xad, 0xb5, 0xb2, 0xe7, + 0x05, 0xfc, 0x40, 0x4c, 0x5e, 0xe4, 0x61, 0x6a, 0x1a, 0x7c, 0x21, 0xde, 0xec, 0xfd, 0x42, 0xa8, + 0x5c, 0x9f, 0x1c, 0xd2, 0x8a, 0x44, 0x91, 0x66, 0x02, 0xfc, 0x89, 0x01, 0xc6, 0xac, 0x6a, 0x15, + 0x07, 0x21, 0xb6, 0xc5, 0x36, 0xce, 0x9d, 0x6e, 0x54, 0x4f, 0x49, 0x83, 0xc6, 0x96, 0x74, 0x54, + 0x94, 0x36, 0x02, 0xbe, 0x08, 0xce, 0xd2, 0xd0, 0x27, 0xd8, 0x8e, 0x23, 0x48, 0x66, 0x17, 0xd8, + 0x6a, 0x16, 0xce, 0x56, 0x52, 0x1c, 0x94, 0x91, 0x2c, 0xb6, 0x06, 0x41, 0xe1, 0x01, 0x11, 0xfa, + 0x10, 0x45, 0xef, 0x55, 0x30, 0xc8, 0x67, 0x6a, 0x73, 0x87, 0xe4, 0xb5, 0xa3, 0x9e, 0x53, 0x91, + 0xe4, 0xb2, 0xe3, 0x89, 0xe1, 0xb3, 0xe3, 0xa9, 0x8f, 0x0b, 0xaa, 0xe3, 0xa9, 0x22, 0xc8, 0x28, + 0xe6, 0xc3, 0x45, 0x00, 0x6c, 0x1c, 0x10, 0xcc, 0x32, 0x92, 0x6d, 0x0e, 0x71, 0x69, 0xb5, 0x3e, + 0x2b, 0x8a, 0x83, 0x34, 0x29, 0x78, 0x03, 0xc0, 0xf8, 0x9f, 0xe3, 0x7b, 0xaf, 0x59, 0xc4, 0x73, + 0xbc, 0xba, 0x99, 0xe7, 0x66, 0x4f, 0xb3, 0xd3, 0x76, 0xa5, 0x8d, 0x8b, 0x3a, 0x8c, 0x80, 0xfb, + 0x60, 0x50, 0x5c, 0xa3, 0x79, 0xde, 0xe8, 0xe1, 0x8e, 0xbb, 0x6b, 0xb9, 0x8e, 0xcd, 0xa1, 0xca, + 0x80, 0xbb, 0x87, 0xa3, 0x20, 0x89, 0x06, 0xdf, 0x37, 0xc0, 0x28, 0x8d, 0x76, 0x88, 0x94, 0xa6, + 0x3c, 0xab, 0x8f, 0x2c, 0xde, 0xee, 0x15, 0x7c, 0x45, 0xd3, 0x5d, 0x9e, 0x68, 0x35, 0x0b, 0xa3, + 0x3a, 0x05, 0xa5, 0xb0, 0xe1, 0xef, 0x0c, 0x60, 0x5a, 0xb6, 0x08, 0x7d, 0xcb, 0xdd, 0x22, 0x8e, + 0x17, 0x62, 0x22, 0x2e, 0x44, 0xe2, 0xf8, 0xe8, 0x61, 0xad, 0x98, 0xbd, 0x67, 0x95, 0xe7, 0xe4, + 0x4a, 0x9b, 0x4b, 0x5d, 0x2c, 0x40, 0x5d, 0x6d, 0x63, 0x79, 0x63, 0x82, 0x62, 0x17, 0x57, 0x43, + 0x6b, 0xc7, 0xc5, 0x32, 0x57, 0x0d, 0x73, 0x83, 0xd7, 0x1e, 0xdd, 0xe0, 0x4a, 0x5a, 0x63, 0x72, + 0x5f, 0xcf, 0x30, 0x28, 0x6a, 0x03, 0x2f, 0xfe, 0xcb, 0xc8, 0x26, 0x3b, 0xcd, 0xef, 0x95, 0xaa, + 0xe5, 0x62, 0xb8, 0x02, 0x26, 0x58, 0x3d, 0x8e, 0x70, 0xe0, 0x3a, 0x55, 0x8b, 0xf2, 0xfb, 0x98, + 0xd8, 0x6f, 0x09, 0x50, 0x86, 0x8f, 0xda, 0x46, 0xc0, 0x57, 0x01, 0x14, 0x85, 0x6a, 0x4a, 0x8f, + 0xa8, 0x4d, 0x54, 0xc9, 0x59, 0x69, 0x93, 0x40, 0x1d, 0x46, 0xc1, 0x65, 0x30, 0xe9, 0x5a, 0x3b, + 0xd8, 0x15, 0xf3, 0xf3, 0x09, 0x57, 0x25, 0x6e, 0xac, 0x53, 0xad, 0x66, 0x61, 0x72, 0x3d, 0xcb, + 0x44, 0xed, 0xf2, 0xc5, 0xcb, 0xd9, 0xec, 0xa2, 0x4f, 0x5c, 0x94, 0xff, 0x1f, 0xe6, 0xc0, 0x4c, + 0xf7, 0x30, 0x85, 0xdf, 0x56, 0xc5, 0xba, 0xa8, 0x41, 0x5f, 0x3f, 0x85, 0xcd, 0x20, 0x2f, 0x28, + 0xa0, 0xfd, 0x72, 0x02, 0x8f, 0x58, 0x05, 0x61, 0xb9, 0x71, 0x23, 0x62, 0xfb, 0x34, 0xd0, 0x99, + 0xfe, 0xf2, 0xb0, 0xa8, 0x4b, 0x2c, 0x97, 0x97, 0x21, 0x96, 0x8b, 0x8b, 0x1f, 0xb5, 0x5d, 0xb6, + 0x93, 0xf4, 0x01, 0xbf, 0x6f, 0x80, 0x71, 0x3f, 0xc0, 0xde, 0xd2, 0xd6, 0xda, 0xdd, 0xff, 0x17, + 0x69, 0x44, 0x3a, 0xe8, 0x04, 0x31, 0xce, 0x6e, 0xfc, 0x42, 0xd7, 0x16, 0xf1, 0x03, 0x5a, 0x3e, + 0xd7, 0x6a, 0x16, 0xc6, 0x37, 0xd3, 0x28, 0x28, 0x0b, 0x5b, 0x6c, 0x80, 0xa9, 0xd5, 0xc3, 0x10, + 0x13, 0xcf, 0x72, 0x57, 0xfc, 0x6a, 0xd4, 0xc0, 0x5e, 0x28, 0x6c, 0xcc, 0x34, 0x30, 0x8c, 0x87, + 0x6c, 0x60, 0x5c, 0x02, 0x7d, 0x11, 0x71, 0x65, 0xd4, 0x8e, 0xa8, 0xb6, 0x1c, 0x5a, 0x47, 0x8c, + 0x5e, 0xbc, 0x0c, 0xfa, 0x99, 0x9d, 0xf0, 0x02, 0xe8, 0x23, 0xd6, 0x01, 0xd7, 0x3a, 0x5a, 0x1e, + 0x62, 0x22, 0xc8, 0x3a, 0x40, 0x8c, 0x56, 0xfc, 0xdb, 0x1c, 0x18, 0xcf, 0xcc, 0x05, 0xce, 0x80, + 0x9c, 0xea, 0xf5, 0x01, 0xa9, 0x34, 0xb7, 0xb6, 0x82, 0x72, 0x8e, 0x0d, 0x5f, 0x50, 0xf9, 0x5e, + 0x80, 0x16, 0xd4, 0xf1, 0xc5, 0xa9, 0xac, 0x50, 0x4c, 0xd4, 0x31, 0x43, 0xe2, 0x84, 0xcd, 0x6c, + 0xc0, 0x35, 0xb9, 0x2b, 0x84, 0x0d, 0xb8, 0x86, 0x18, 0xed, 0x51, 0xbb, 0x37, 0x71, 0xfb, 0x68, + 0xe0, 0x21, 0xda, 0x47, 0x83, 0xf7, 0x6d, 0x1f, 0x5d, 0x01, 0x03, 0xa1, 0x13, 0xba, 0x98, 0x9f, + 0x9d, 0x5a, 0x79, 0x7e, 0x9b, 0x11, 0x91, 0xe0, 0x41, 0x0c, 0x86, 0x6c, 0x5c, 0xb3, 0x22, 0x37, + 0xe4, 0xc7, 0xe4, 0xc8, 0xe2, 0xd7, 0x4f, 0x16, 0x3d, 0xa2, 0xbd, 0xb2, 0x22, 0x54, 0xa2, 0x58, + 0x37, 0x7c, 0x1c, 0x0c, 0x35, 0xac, 0x43, 0xa7, 0x11, 0x35, 0x78, 0x0d, 0x6b, 0x08, 0xb1, 0x0d, + 0x41, 0x42, 0x31, 0x8f, 0x25, 0x41, 0x7c, 0x58, 0x75, 0x23, 0xea, 0xec, 0x63, 0xc9, 0x94, 0x45, + 0xa6, 0x4a, 0x82, 0xab, 0x19, 0x3e, 0x6a, 0x1b, 0xc1, 0xc1, 0x1c, 0x8f, 0x0f, 0x1e, 0xd1, 0xc0, + 0x04, 0x09, 0xc5, 0xbc, 0x34, 0x98, 0x94, 0x1f, 0xed, 0x06, 0x26, 0x07, 0xb7, 0x8d, 0x80, 0x4f, + 0x81, 0xe1, 0x86, 0x75, 0xb8, 0x8e, 0xbd, 0x7a, 0xb8, 0x6b, 0x8e, 0xcd, 0x19, 0xf3, 0x7d, 0xe5, + 0xb1, 0x56, 0xb3, 0x30, 0xbc, 0x11, 0x13, 0x51, 0xc2, 0xe7, 0xc2, 0x8e, 0x27, 0x85, 0xcf, 0x6a, + 0xc2, 0x31, 0x11, 0x25, 0x7c, 0x56, 0x2b, 0x05, 0x56, 0xc8, 0xf6, 0x95, 0x39, 0x9e, 0xbe, 0xca, + 0x6f, 0x09, 0x32, 0x8a, 0xf9, 0x70, 0x1e, 0xe4, 0x1b, 0xd6, 0x21, 0xbf, 0xe5, 0x9a, 0x13, 0x5c, + 0x2d, 0x6f, 0x71, 0x6e, 0x48, 0x1a, 0x52, 0x5c, 0x2e, 0xe9, 0x78, 0x42, 0x72, 0x52, 0x93, 0x94, + 0x34, 0xa4, 0xb8, 0x2c, 0x7e, 0x23, 0xcf, 0xb9, 0x17, 0x61, 0x21, 0x0c, 0xb9, 0x67, 0x54, 0xfc, + 0xde, 0x49, 0x58, 0x48, 0x97, 0x63, 0xb7, 0xcc, 0x46, 0xe4, 0x86, 0x4e, 0xe0, 0xe2, 0xcd, 0x9a, + 0x79, 0x8e, 0xfb, 0x9f, 0x5f, 0x2e, 0x36, 0x14, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0xd0, 0x8f, 0xbd, + 0xa8, 0x61, 0x9e, 0xe7, 0xe7, 0xf3, 0x49, 0xa3, 0x4f, 0xed, 0x97, 0x55, 0x2f, 0x6a, 0x20, 0xae, + 0x19, 0xbe, 0x00, 0xc6, 0x1a, 0xd6, 0x21, 0x4b, 0x02, 0x98, 0x84, 0xec, 0xea, 0x3b, 0xc5, 0xe7, + 0x3d, 0xc9, 0xca, 0xea, 0x0d, 0x9d, 0x81, 0xd2, 0x72, 0x7c, 0xa0, 0xe3, 0x69, 0x03, 0xa7, 0xb5, + 0x81, 0x3a, 0x03, 0xa5, 0xe5, 0x98, 0x93, 0x09, 0xbe, 0x17, 0x39, 0x04, 0xdb, 0xe6, 0xff, 0xf0, + 0x4a, 0x5c, 0x76, 0x9c, 0x05, 0x0d, 0x29, 0x2e, 0xbc, 0x17, 0x37, 0x41, 0x4c, 0xbe, 0xf9, 0xb6, + 0x7a, 0x96, 0xba, 0x37, 0xc9, 0x12, 0x21, 0xd6, 0x91, 0x38, 0x55, 0xf4, 0xf6, 0x07, 0xf4, 0xc0, + 0x80, 0xe5, 0xba, 0x9b, 0x35, 0xf3, 0xc2, 0x49, 0x2b, 0xa2, 0xec, 0x69, 0xa1, 0x32, 0xcc, 0x12, + 0xd3, 0x8f, 0x04, 0x0c, 0xc3, 0xf3, 0x3d, 0x16, 0x0b, 0x33, 0xa7, 0x86, 0xb7, 0xc9, 0xf4, 0x23, + 0x01, 0xc3, 0xe7, 0xe7, 0x1d, 0x6d, 0xd6, 0xcc, 0xc7, 0x4e, 0x6f, 0x7e, 0x4c, 0x3f, 0x12, 0x30, + 0xd0, 0x06, 0x7d, 0x9e, 0x1f, 0x9a, 0x17, 0x7b, 0x7d, 0xf6, 0xf2, 0xd3, 0xe4, 0x96, 0x1f, 0x22, + 0xa6, 0x1e, 0xfe, 0xd0, 0x00, 0x20, 0x48, 0x22, 0xf1, 0xd2, 0x49, 0x9b, 0x12, 0x19, 0xb4, 0x52, + 0x12, 0xbd, 0xab, 0x5e, 0x48, 0x8e, 0x92, 0x9b, 0x96, 0x16, 0xe5, 0x9a, 0x01, 0xf0, 0x67, 0x06, + 0x38, 0xaf, 0x17, 0xe0, 0xca, 0xb2, 0x59, 0xee, 0x87, 0xcd, 0x1e, 0x06, 0x72, 0xd9, 0xf7, 0xdd, + 0xb2, 0xd9, 0x6a, 0x16, 0xce, 0x2f, 0x75, 0x00, 0x44, 0x1d, 0xcd, 0x80, 0xbf, 0x32, 0xc0, 0xa4, + 0xcc, 0x8e, 0x9a, 0x71, 0x05, 0xee, 0xb6, 0xb7, 0x7b, 0xe8, 0xb6, 0x2c, 0x84, 0xf0, 0x9e, 0xfa, + 0xee, 0xd9, 0xc6, 0x47, 0xed, 0x56, 0xc1, 0xdf, 0x1a, 0x60, 0xd4, 0xc6, 0x01, 0xf6, 0x6c, 0xec, + 0x55, 0x99, 0x99, 0x73, 0x27, 0xed, 0x74, 0x64, 0xcd, 0x5c, 0xd1, 0xb4, 0x0b, 0x0b, 0x4b, 0xd2, + 0xc2, 0x51, 0x9d, 0x75, 0xdc, 0x2c, 0x4c, 0x27, 0x43, 0x75, 0x0e, 0x4a, 0x19, 0x08, 0x7f, 0x64, + 0x80, 0xf1, 0xc4, 0xed, 0xe2, 0x80, 0xb8, 0x7c, 0x3a, 0x0b, 0xcf, 0x4b, 0xd0, 0xa5, 0x34, 0x16, + 0xca, 0x82, 0xc3, 0x5f, 0x1b, 0xac, 0xda, 0x8a, 0x6f, 0x8f, 0xd4, 0x2c, 0x72, 0x0f, 0xbe, 0xd1, + 0x4b, 0x0f, 0x2a, 0xe5, 0xc2, 0x81, 0xd7, 0x92, 0x4a, 0x4e, 0x71, 0x8e, 0x9b, 0x85, 0x29, 0xdd, + 0x7f, 0x8a, 0x81, 0x74, 0xe3, 0xe0, 0x7b, 0x06, 0x18, 0xc5, 0x49, 0xc1, 0x4c, 0xcd, 0x2b, 0x27, + 0x75, 0x5d, 0xc7, 0xf2, 0x5b, 0x5c, 0xf0, 0x35, 0x16, 0x45, 0x29, 0x58, 0x56, 0xfb, 0xe1, 0x43, + 0xab, 0x11, 0xb8, 0xd8, 0xfc, 0xdf, 0xde, 0xd5, 0x7e, 0xab, 0x42, 0x25, 0x8a, 0x75, 0xc3, 0x6b, + 0x20, 0xef, 0x45, 0xae, 0xcb, 0xae, 0xc3, 0xe6, 0xe3, 0xbc, 0x8a, 0x50, 0x1d, 0xcf, 0x5b, 0x92, + 0x8e, 0x94, 0x04, 0xac, 0x81, 0xb9, 0xc3, 0x9b, 0xd1, 0x0e, 0x26, 0x1e, 0x0e, 0x31, 0xed, 0xd8, + 0x52, 0x34, 0xaf, 0x72, 0x2d, 0x33, 0xad, 0x66, 0x61, 0x7a, 0xbb, 0x73, 0xd3, 0xf1, 0x81, 0x3a, + 0xe0, 0x9b, 0xe0, 0x31, 0x4d, 0x66, 0xb5, 0xb1, 0x83, 0x6d, 0x1b, 0xdb, 0xf1, 0x45, 0xcb, 0xfc, + 0x3f, 0x0e, 0xa1, 0xf6, 0xf1, 0x76, 0x56, 0x00, 0xdd, 0x6f, 0x34, 0x5c, 0x07, 0xd3, 0x1a, 0x7b, + 0xcd, 0x0b, 0x37, 0x49, 0x25, 0x24, 0x8e, 0x57, 0x37, 0xe7, 0xb9, 0xde, 0xf3, 0xf1, 0xee, 0xdb, + 0xd6, 0x78, 0xa8, 0xcb, 0x18, 0xf8, 0x4a, 0x4a, 0x1b, 0xff, 0x94, 0x62, 0x05, 0x37, 0xf1, 0x11, + 0x35, 0x9f, 0xe0, 0xc5, 0x05, 0x5f, 0xe7, 0x6d, 0x8d, 0x8e, 0xba, 0xc8, 0xc3, 0x97, 0xc0, 0xb9, + 0x0c, 0x87, 0xdd, 0x2b, 0xcc, 0x27, 0xc5, 0x05, 0x81, 0x55, 0xa2, 0xdb, 0x31, 0x11, 0x75, 0x92, + 0x84, 0x5f, 0x05, 0x50, 0x23, 0x6f, 0x58, 0x01, 0x1f, 0xff, 0x94, 0xb8, 0xab, 0xb0, 0x15, 0xdd, + 0x96, 0x34, 0xd4, 0x41, 0x0e, 0x7e, 0x68, 0xa4, 0x66, 0x92, 0xdc, 0x66, 0xa9, 0x79, 0x8d, 0x6f, + 0xd8, 0x57, 0x1e, 0x3d, 0x00, 0x13, 0x65, 0x28, 0x72, 0xb1, 0xe6, 0x61, 0x0d, 0x05, 0x75, 0x41, + 0x9f, 0x61, 0x97, 0xe9, 0x4c, 0x0e, 0x87, 0x13, 0xa0, 0x6f, 0x0f, 0xcb, 0x0f, 0xd9, 0x88, 0xfd, + 0x84, 0x6f, 0x81, 0x81, 0x7d, 0xcb, 0x8d, 0xe2, 0x56, 0x40, 0xef, 0xce, 0x7a, 0x24, 0xf4, 0xbe, + 0x98, 0xbb, 0x6e, 0xcc, 0x7c, 0x60, 0x80, 0xe9, 0xce, 0xa7, 0xca, 0x17, 0x65, 0xd1, 0x4f, 0x0d, + 0x30, 0xd9, 0x76, 0x80, 0x74, 0x30, 0xc6, 0x4d, 0x1b, 0x73, 0xb7, 0x87, 0x27, 0x81, 0xd8, 0x08, + 0xbc, 0xa2, 0xd5, 0x2d, 0xfb, 0x81, 0x01, 0x26, 0xb2, 0x89, 0xf9, 0x0b, 0xf2, 0x52, 0xf1, 0xfd, + 0x1c, 0x98, 0xee, 0x5c, 0x83, 0xc3, 0x86, 0xea, 0x2e, 0xf4, 0xbc, 0x41, 0xd3, 0xa9, 0x89, 0xfc, + 0xae, 0x01, 0x46, 0xde, 0x51, 0x72, 0xf1, 0xf7, 0xd5, 0x5e, 0x76, 0x85, 0xe2, 0xa3, 0x2f, 0x61, + 0x50, 0xa4, 0x43, 0x16, 0x7f, 0x63, 0x80, 0xa9, 0x8e, 0xc7, 0x39, 0xbc, 0x0a, 0x06, 0x2d, 0xd7, + 0xf5, 0x0f, 0x44, 0x37, 0x4f, 0xfb, 0x50, 0xb0, 0xc4, 0xa9, 0x48, 0x72, 0x35, 0x9f, 0xe5, 0x3e, + 0x07, 0x9f, 0x15, 0x7f, 0x6f, 0x80, 0x8b, 0xf7, 0x8b, 0xba, 0xcf, 0x7b, 0x0d, 0xe7, 0x41, 0x5e, + 0x16, 0xdb, 0x47, 0x7c, 0xfd, 0x64, 0x76, 0x95, 0x19, 0x81, 0xbf, 0xdf, 0x11, 0xbf, 0x8a, 0x2f, + 0x81, 0xf1, 0x4c, 0x03, 0x3a, 0xf5, 0xa4, 0xc7, 0x78, 0xe0, 0x93, 0x9e, 0x5f, 0x18, 0x60, 0xa2, + 0x82, 0xc9, 0xbe, 0x53, 0xc5, 0x08, 0xd7, 0x30, 0xc1, 0x5e, 0x15, 0xc3, 0x05, 0x30, 0xcc, 0x3f, + 0xa0, 0x06, 0x56, 0x35, 0xfe, 0xec, 0x33, 0x29, 0x75, 0x0c, 0xdf, 0x8a, 0x19, 0x28, 0x91, 0x51, + 0x9f, 0x88, 0x72, 0x5d, 0x3f, 0x11, 0x5d, 0x04, 0xfd, 0x41, 0xd2, 0x41, 0xce, 0x33, 0x2e, 0xb7, + 0x84, 0x53, 0x39, 0xd7, 0x27, 0x21, 0x6f, 0x93, 0x0d, 0x48, 0xae, 0x4f, 0x42, 0xc4, 0xa9, 0xc5, + 0x3f, 0xe6, 0xc0, 0xd9, 0x74, 0x82, 0x67, 0x80, 0x24, 0x72, 0xdb, 0xbe, 0x49, 0x31, 0x1e, 0xe2, + 0x1c, 0xfd, 0x29, 0x44, 0xee, 0xfe, 0x4f, 0x21, 0xe0, 0xcb, 0x60, 0x52, 0xfe, 0x5c, 0x3d, 0x0c, + 0x08, 0xa6, 0xfc, 0x63, 0x6b, 0x5f, 0xfa, 0x09, 0xe3, 0x46, 0x56, 0x00, 0xb5, 0x8f, 0x81, 0x5f, + 0xc9, 0x3c, 0xd3, 0xb8, 0x92, 0x3c, 0xd1, 0x60, 0xc5, 0x21, 0x5f, 0x9f, 0xbb, 0x2c, 0x67, 0xac, + 0x12, 0xe2, 0x93, 0xcc, 0xdb, 0x8d, 0x05, 0x30, 0x5c, 0x63, 0x02, 0x7c, 0xe1, 0x06, 0xd2, 0x4e, + 0xbf, 0x11, 0x33, 0x50, 0x22, 0x03, 0xbf, 0x06, 0xc6, 0xfd, 0x40, 0x94, 0xc1, 0x9b, 0xae, 0x5d, + 0xc1, 0x6e, 0x8d, 0xb7, 0x04, 0xf3, 0x71, 0xdf, 0x36, 0xc5, 0x42, 0x59, 0xd9, 0xe2, 0x1f, 0x0c, + 0x70, 0x2e, 0x7e, 0x1f, 0xe5, 0x3a, 0xd8, 0x0b, 0x97, 0x7d, 0xaf, 0xe6, 0xd4, 0xe1, 0x05, 0xd1, + 0x7f, 0xd5, 0x9a, 0x9a, 0x71, 0xef, 0x15, 0xde, 0x03, 0x43, 0x54, 0xc4, 0x8a, 0xdc, 0x07, 0xaf, + 0x9e, 0xe4, 0x83, 0x4a, 0x3a, 0xe8, 0x44, 0xf9, 0x18, 0x53, 0x63, 0x1c, 0xb6, 0x15, 0xaa, 0x56, + 0x39, 0xf2, 0x6c, 0xd9, 0x83, 0x1f, 0x15, 0x5b, 0x61, 0x79, 0x49, 0xd0, 0x90, 0xe2, 0x16, 0xff, + 0x6e, 0x80, 0xc9, 0xb6, 0xf7, 0x5e, 0xf0, 0xbb, 0x06, 0x18, 0xad, 0x6a, 0xd3, 0x93, 0x09, 0x65, + 0xe3, 0xe4, 0x6f, 0xca, 0x34, 0xa5, 0xa2, 0x06, 0xd3, 0x29, 0x28, 0x05, 0x0a, 0xb7, 0x81, 0x59, + 0xcd, 0x3c, 0xad, 0xcc, 0x7c, 0xac, 0xbd, 0xd8, 0x6a, 0x16, 0xcc, 0xe5, 0x2e, 0x32, 0xa8, 0xeb, + 0xe8, 0xf2, 0x37, 0x3f, 0xfe, 0x6c, 0xf6, 0xcc, 0x27, 0x9f, 0xcd, 0x9e, 0xf9, 0xf4, 0xb3, 0xd9, + 0x33, 0xef, 0xb6, 0x66, 0x8d, 0x8f, 0x5b, 0xb3, 0xc6, 0x27, 0xad, 0x59, 0xe3, 0xd3, 0xd6, 0xac, + 0xf1, 0x97, 0xd6, 0xac, 0xf1, 0xe3, 0xbf, 0xce, 0x9e, 0x79, 0xe3, 0xfa, 0xa3, 0x3e, 0xa8, 0xfe, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x1c, 0x7a, 0x10, 0x8b, 0x2d, 0x00, 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -1618,6 +1648,20 @@ func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int _ = i var l int _ = l + if len(m.SelectableFields) > 0 { + for iNdEx := len(m.SelectableFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SelectableFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } if m.DeprecationWarning != nil { i -= len(*m.DeprecationWarning) copy(dAtA[i:], *m.DeprecationWarning) @@ -2570,6 +2614,34 @@ func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } +func (m *SelectableField) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectableField) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelectableField) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3027,6 +3099,12 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { l = len(*m.DeprecationWarning) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.SelectableFields) > 0 { + for _, e := range m.SelectableFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3341,6 +3419,17 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { return n } +func (m *SelectableField) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ServiceReference) Size() (n int) { if m == nil { return 0 @@ -3605,6 +3694,11 @@ func (this *CustomResourceDefinitionVersion) String() string { repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," } repeatedStringForAdditionalPrinterColumns += "}" + repeatedStringForSelectableFields := "[]SelectableField{" + for _, f := range this.SelectableFields { + repeatedStringForSelectableFields += strings.Replace(strings.Replace(f.String(), "SelectableField", "SelectableField", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectableFields += "}" s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, @@ -3614,6 +3708,7 @@ func (this *CustomResourceDefinitionVersion) String() string { `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, + `SelectableFields:` + repeatedStringForSelectableFields + `,`, `}`, }, "") return s @@ -3837,6 +3932,16 @@ func (this *JSONSchemaPropsOrStringArray) String() string { }, "") return s } +func (this *SelectableField) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectableField{`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} func (this *ServiceReference) String() string { if this == nil { return "nil" @@ -6027,6 +6132,40 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.DeprecationWarning = &s iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectableFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectableFields = append(m.SelectableFields, SelectableField{}) + if err := m.SelectableFields[len(m.SelectableFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8667,6 +8806,88 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelectableField) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectableField: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectableField: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 3c39d63a5f2..2ad78822f8b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -40,6 +40,7 @@ message ConversionRequest { optional string desiredAPIVersion = 2; // objects is the list of custom resource objects to be converted. + // +listType=atomic repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } @@ -53,6 +54,7 @@ message ConversionResponse { // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + // +listType=atomic repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if @@ -182,6 +184,7 @@ message CustomResourceDefinitionNames { // and used by clients to support invocations like `kubectl get `. // It must be all lowercase. // +optional + // +listType=atomic repeated string shortNames = 3; // kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -196,6 +199,7 @@ message CustomResourceDefinitionNames { // This is published in API discovery documents, and used by clients to support invocations like // `kubectl get all`. // +optional + // +listType=atomic repeated string categories = 6; } @@ -221,6 +225,7 @@ message CustomResourceDefinitionSpec { // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +listType=atomic repeated CustomResourceDefinitionVersion versions = 7; // conversion defines conversion settings for the CRD. @@ -256,6 +261,7 @@ message CustomResourceDefinitionStatus { // versions from this list. // Versions may not be removed from `spec.versions` while they exist in this list. // +optional + // +listType=atomic repeated string storedVersions = 3; } @@ -297,7 +303,17 @@ message CustomResourceDefinitionVersion { // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // If no columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; + + // selectableFields specifies paths to fields that may be used as field selectors. + // A maximum of 8 selectable fields are allowed. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + repeated SelectableField selectableFields = 9; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. @@ -439,20 +455,25 @@ message JSONSchemaProps { optional double multipleOf = 19; + // +listType=atomic repeated JSON enum = 20; optional int64 maxProperties = 21; optional int64 minProperties = 22; + // +listType=atomic repeated string required = 23; optional JSONSchemaPropsOrArray items = 24; + // +listType=atomic repeated JSONSchemaProps allOf = 25; + // +listType=atomic repeated JSONSchemaProps oneOf = 26; + // +listType=atomic repeated JSONSchemaProps anyOf = 27; optional JSONSchemaProps not = 28; @@ -518,6 +539,7 @@ message JSONSchemaProps { // to ensure those properties are present for all list items. // // +optional + // +listType=atomic repeated string xKubernetesListMapKeys = 41; // x-kubernetes-list-type annotates an array to further describe its topology. @@ -564,6 +586,7 @@ message JSONSchemaProps { message JSONSchemaPropsOrArray { optional JSONSchemaProps schema = 1; + // +listType=atomic repeated JSONSchemaProps jSONSchemas = 2; } @@ -579,9 +602,23 @@ message JSONSchemaPropsOrBool { message JSONSchemaPropsOrStringArray { optional JSONSchemaProps schema = 1; + // +listType=atomic repeated string property = 2; } +// SelectableField specifies the JSON path of a field that may be used with field selectors. +message SelectableField { + // jsonPath is a simple JSON path which is evaluated against each custom resource to produce a + // field selector value. + // Only JSON paths without the array notation are allowed. + // Must point to a field of type string, boolean or integer. Types with enum values + // and strings with formats are allowed. + // If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. + // Must not point to metdata fields. + // Required. + optional string jsonPath = 1; +} + // ServiceReference holds a reference to Service.legacy.k8s.io message ServiceReference { // namespace is the namespace of the service. @@ -787,6 +824,7 @@ message WebhookConversion { // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. + // +listType=atomic repeated string conversionReviewVersions = 3; } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go index 59ec0e372b8..e1d1e0be390 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -56,6 +56,7 @@ type CustomResourceDefinitionSpec struct { // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +listType=atomic Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` // conversion defines conversion settings for the CRD. @@ -96,6 +97,7 @@ type WebhookConversion struct { // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. + // +listType=atomic ConversionReviewVersions []string `json:"conversionReviewVersions" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } @@ -195,7 +197,30 @@ type CustomResourceDefinitionVersion struct { // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // If no columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` + + // selectableFields specifies paths to fields that may be used as field selectors. + // A maximum of 8 selectable fields are allowed. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + SelectableFields []SelectableField `json:"selectableFields,omitempty" protobuf:"bytes,9,rep,name=selectableFields"` +} + +// SelectableField specifies the JSON path of a field that may be used with field selectors. +type SelectableField struct { + // jsonPath is a simple JSON path which is evaluated against each custom resource to produce a + // field selector value. + // Only JSON paths without the array notation are allowed. + // Must point to a field of type string, boolean or integer. Types with enum values + // and strings with formats are allowed. + // If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. + // Must not point to metdata fields. + // Required. + JSONPath string `json:"jsonPath" protobuf:"bytes,1,opt,name=jsonPath"` } // CustomResourceColumnDefinition specifies a column for server side printing. @@ -237,6 +262,7 @@ type CustomResourceDefinitionNames struct { // and used by clients to support invocations like `kubectl get `. // It must be all lowercase. // +optional + // +listType=atomic ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` // kind is the serialized kind of the resource. It is normally CamelCase and singular. // Custom resource instances will use this value as the `kind` attribute in API calls. @@ -248,6 +274,7 @@ type CustomResourceDefinitionNames struct { // This is published in API discovery documents, and used by clients to support invocations like // `kubectl get all`. // +optional + // +listType=atomic Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` } @@ -345,6 +372,7 @@ type CustomResourceDefinitionStatus struct { // versions from this list. // Versions may not be removed from `spec.versions` while they exist in this list. // +optional + // +listType=atomic StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } @@ -463,6 +491,7 @@ type ConversionRequest struct { // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` // objects is the list of custom resource objects to be converted. + // +listType=atomic Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` } @@ -475,6 +504,7 @@ type ConversionResponse struct { // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + // +listType=atomic ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index a81451ad6ef..5dbdf576b3e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -76,25 +76,30 @@ type JSONSchemaProps struct { // default is a default value for undefined object fields. // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. // Defaulting requires spec.preserveUnknownFields to be false. - Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` - Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` - Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` - MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` - MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` - Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` - MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` - MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` - UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` - MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` - Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` - MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` - MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` - Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` - Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` - AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` - OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + // +listType=atomic + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + // +listType=atomic + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + // +listType=atomic + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + // +listType=atomic + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + // +listType=atomic AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` @@ -150,6 +155,7 @@ type JSONSchemaProps struct { // to ensure those properties are present for all list items. // // +optional + // +listType=atomic XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` // x-kubernetes-list-type annotates an array to further describe its topology. @@ -343,7 +349,8 @@ type JSONSchemaURL string // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps // or an array of JSONSchemaProps. Mainly here for serialization purposes. type JSONSchemaPropsOrArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + // +listType=atomic JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` } @@ -385,8 +392,9 @@ type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray // JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. type JSONSchemaPropsOrStringArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - Property []string `protobuf:"bytes,2,rep,name=property"` + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + // +listType=atomic + Property []string `protobuf:"bytes,2,rep,name=property"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go index 405021bf389..bb1d7e01429 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -192,6 +192,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*SelectableField)(nil), (*apiextensions.SelectableField)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SelectableField_To_apiextensions_SelectableField(a.(*SelectableField), b.(*apiextensions.SelectableField), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.SelectableField)(nil), (*SelectableField)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_SelectableField_To_v1_SelectableField(a.(*apiextensions.SelectableField), b.(*SelectableField), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) }); err != nil { @@ -493,6 +503,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResource out.Versions = nil } // WARNING: in.AdditionalPrinterColumns requires manual conversion: does not exist in peer-type + // WARNING: in.SelectableFields requires manual conversion: does not exist in peer-type if in.Conversion != nil { in, out := &in.Conversion, &out.Conversion *out = new(CustomResourceConversion) @@ -553,6 +564,7 @@ func autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResou } out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]apiextensions.SelectableField)(unsafe.Pointer(&in.SelectableFields)) return nil } @@ -578,6 +590,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResou } out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]SelectableField)(unsafe.Pointer(&in.SelectableFields)) return nil } @@ -1225,6 +1238,26 @@ func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrS return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in, out, s) } +func autoConvert_v1_SelectableField_To_apiextensions_SelectableField(in *SelectableField, out *apiextensions.SelectableField, s conversion.Scope) error { + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1_SelectableField_To_apiextensions_SelectableField is an autogenerated conversion function. +func Convert_v1_SelectableField_To_apiextensions_SelectableField(in *SelectableField, out *apiextensions.SelectableField, s conversion.Scope) error { + return autoConvert_v1_SelectableField_To_apiextensions_SelectableField(in, out, s) +} + +func autoConvert_apiextensions_SelectableField_To_v1_SelectableField(in *apiextensions.SelectableField, out *SelectableField, s conversion.Scope) error { + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_SelectableField_To_v1_SelectableField is an autogenerated conversion function. +func Convert_apiextensions_SelectableField_To_v1_SelectableField(in *apiextensions.SelectableField, out *SelectableField, s conversion.Scope) error { + return autoConvert_apiextensions_SelectableField_To_v1_SelectableField(in, out, s) +} + func autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go index bc23fcd86ff..f85a0b0677d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -329,6 +329,11 @@ func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefin *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.SelectableFields != nil { + in, out := &in.SelectableFields, &out.SelectableFields + *out = make([]SelectableField, len(*in)) + copy(*out, *in) + } return } @@ -585,6 +590,22 @@ func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectableField) DeepCopyInto(out *SelectableField) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectableField. +func (in *SelectableField) DeepCopy() *SelectableField { + if in == nil { + return nil + } + out := new(SelectableField) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index c81fa6bc31b..32e58324071 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +// source: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto package v1beta1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } func (*ConversionRequest) ProtoMessage() {} func (*ConversionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{0} + return fileDescriptor_3623d6c0bd238430, []int{0} } func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } func (*ConversionResponse) ProtoMessage() {} func (*ConversionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{1} + return fileDescriptor_3623d6c0bd238430, []int{1} } func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo func (m *ConversionReview) Reset() { *m = ConversionReview{} } func (*ConversionReview) ProtoMessage() {} func (*ConversionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{2} + return fileDescriptor_3623d6c0bd238430, []int{2} } func (m *ConversionReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_ConversionReview proto.InternalMessageInfo func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{3} + return fileDescriptor_3623d6c0bd238430, []int{3} } func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } func (*CustomResourceConversion) ProtoMessage() {} func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{4} + return fileDescriptor_3623d6c0bd238430, []int{4} } func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{5} + return fileDescriptor_3623d6c0bd238430, []int{5} } func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{6} + return fileDescriptor_3623d6c0bd238430, []int{6} } func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{7} + return fileDescriptor_3623d6c0bd238430, []int{7} } func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{8} + return fileDescriptor_3623d6c0bd238430, []int{8} } func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{9} + return fileDescriptor_3623d6c0bd238430, []int{9} } func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{10} + return fileDescriptor_3623d6c0bd238430, []int{10} } func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{11} + return fileDescriptor_3623d6c0bd238430, []int{11} } func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{12} + return fileDescriptor_3623d6c0bd238430, []int{12} } func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{13} + return fileDescriptor_3623d6c0bd238430, []int{13} } func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{14} + return fileDescriptor_3623d6c0bd238430, []int{14} } func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{15} + return fileDescriptor_3623d6c0bd238430, []int{15} } func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } func (*ExternalDocumentation) ProtoMessage() {} func (*ExternalDocumentation) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{16} + return fileDescriptor_3623d6c0bd238430, []int{16} } func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo func (m *JSON) Reset() { *m = JSON{} } func (*JSON) ProtoMessage() {} func (*JSON) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{17} + return fileDescriptor_3623d6c0bd238430, []int{17} } func (m *JSON) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_JSON proto.InternalMessageInfo func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } func (*JSONSchemaProps) ProtoMessage() {} func (*JSONSchemaProps) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{18} + return fileDescriptor_3623d6c0bd238430, []int{18} } func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +583,7 @@ var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } func (*JSONSchemaPropsOrArray) ProtoMessage() {} func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{19} + return fileDescriptor_3623d6c0bd238430, []int{19} } func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +611,7 @@ var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } func (*JSONSchemaPropsOrBool) ProtoMessage() {} func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{20} + return fileDescriptor_3623d6c0bd238430, []int{20} } func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +639,7 @@ var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{21} + return fileDescriptor_3623d6c0bd238430, []int{21} } func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,10 +664,38 @@ func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo +func (m *SelectableField) Reset() { *m = SelectableField{} } +func (*SelectableField) ProtoMessage() {} +func (*SelectableField) Descriptor() ([]byte, []int) { + return fileDescriptor_3623d6c0bd238430, []int{22} +} +func (m *SelectableField) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelectableField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelectableField) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelectableField.Merge(m, src) +} +func (m *SelectableField) XXX_Size() int { + return m.Size() +} +func (m *SelectableField) XXX_DiscardUnknown() { + xxx_messageInfo_SelectableField.DiscardUnknown(m) +} + +var xxx_messageInfo_SelectableField proto.InternalMessageInfo + func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{22} + return fileDescriptor_3623d6c0bd238430, []int{23} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +723,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidationRule) Reset() { *m = ValidationRule{} } func (*ValidationRule) ProtoMessage() {} func (*ValidationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{23} + return fileDescriptor_3623d6c0bd238430, []int{24} } func (m *ValidationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +751,7 @@ var xxx_messageInfo_ValidationRule proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{24} + return fileDescriptor_3623d6c0bd238430, []int{25} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,216 +803,219 @@ func init() { proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*SelectableField)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.SelectableField") proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ValidationRule") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) -} - -var fileDescriptor_98a4cc6918394e53 = []byte{ - // 3170 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, - 0xd9, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb6, 0xa4, 0xb6, 0xa5, 0x8c, 0x15, 0x47, 0x2b, 0xaf, - 0xdf, 0xf8, 0x55, 0x12, 0x67, 0x95, 0xf8, 0x4d, 0xde, 0xe4, 0xcd, 0x4b, 0x8a, 0xd2, 0x4a, 0x72, - 0x50, 0x62, 0x59, 0xa2, 0xd7, 0x76, 0x04, 0xf9, 0x1c, 0xed, 0xf4, 0xae, 0x27, 0x9a, 0x9d, 0x19, - 0x77, 0xcf, 0xac, 0xa4, 0x0a, 0x50, 0x7c, 0x54, 0x0a, 0x8a, 0x02, 0x42, 0x91, 0x5c, 0x28, 0xe0, - 0x10, 0x28, 0x2e, 0x1c, 0xe0, 0x00, 0x37, 0xf8, 0x03, 0x72, 0x4c, 0x01, 0x87, 0x1c, 0xa8, 0x85, - 0x2c, 0x57, 0x8e, 0x54, 0x51, 0xa5, 0x13, 0xd5, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x6b, 0xbb, 0xa2, - 0xdd, 0x98, 0xdb, 0xee, 0xf3, 0xf5, 0x7b, 0xe6, 0xe9, 0xa7, 0x9f, 0x7e, 0xfa, 0x99, 0x01, 0xb5, - 0xbd, 0x67, 0x69, 0xc9, 0xf1, 0x97, 0xf7, 0xa2, 0x5d, 0x4c, 0x3c, 0x1c, 0x62, 0xba, 0xdc, 0xc4, - 0x9e, 0xed, 0x93, 0x65, 0xc9, 0xb0, 0x02, 0x07, 0x1f, 0x84, 0xd8, 0xa3, 0x8e, 0xef, 0xd1, 0xc7, - 0xad, 0xc0, 0xa1, 0x98, 0x34, 0x31, 0x59, 0x0e, 0xf6, 0xea, 0x8c, 0x47, 0x3b, 0x05, 0x96, 0x9b, - 0x4f, 0xee, 0xe2, 0xd0, 0x7a, 0x72, 0xb9, 0x8e, 0x3d, 0x4c, 0xac, 0x10, 0xdb, 0xa5, 0x80, 0xf8, - 0xa1, 0x0f, 0x9f, 0x17, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x28, 0x73, 0xa5, 0x60, 0xaf, 0xce, 0x78, - 0xb4, 0x53, 0xa0, 0x24, 0xcd, 0xcd, 0x3f, 0x5e, 0x77, 0xc2, 0x5b, 0xd1, 0x6e, 0xa9, 0xea, 0x37, - 0x96, 0xeb, 0x7e, 0xdd, 0x5f, 0xe6, 0x56, 0x77, 0xa3, 0x1a, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x02, - 0x6d, 0xfe, 0xa9, 0xc4, 0xf9, 0x86, 0x55, 0xbd, 0xe5, 0x78, 0x98, 0x1c, 0x26, 0x1e, 0x37, 0x70, - 0x68, 0x2d, 0x37, 0xbb, 0x7c, 0x9c, 0x5f, 0xee, 0xa7, 0x45, 0x22, 0x2f, 0x74, 0x1a, 0xb8, 0x4b, - 0xe1, 0x7f, 0xef, 0xa6, 0x40, 0xab, 0xb7, 0x70, 0xc3, 0x4a, 0xeb, 0x15, 0x8f, 0x0c, 0x30, 0xb3, - 0xea, 0x7b, 0x4d, 0x4c, 0xd8, 0x53, 0x22, 0x7c, 0x3b, 0xc2, 0x34, 0x84, 0x65, 0x90, 0x8d, 0x1c, - 0xdb, 0x34, 0x16, 0x8d, 0xa5, 0xf1, 0xf2, 0x13, 0x1f, 0xb6, 0x0a, 0x27, 0xda, 0xad, 0x42, 0xf6, - 0xc6, 0xc6, 0xda, 0x51, 0xab, 0x70, 0xbe, 0x1f, 0x52, 0x78, 0x18, 0x60, 0x5a, 0xba, 0xb1, 0xb1, - 0x86, 0x98, 0x32, 0x7c, 0x01, 0xcc, 0xd8, 0x98, 0x3a, 0x04, 0xdb, 0x2b, 0xdb, 0x1b, 0x37, 0x85, - 0x7d, 0x33, 0xc3, 0x2d, 0x9e, 0x95, 0x16, 0x67, 0xd6, 0xd2, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x80, - 0x31, 0x7f, 0xf7, 0x2d, 0x5c, 0x0d, 0xa9, 0x99, 0x5d, 0xcc, 0x2e, 0x4d, 0x5c, 0x7e, 0xbc, 0x94, - 0xac, 0xa0, 0x72, 0x81, 0x2f, 0x9b, 0x7c, 0xd8, 0x12, 0xb2, 0xf6, 0xd7, 0xe3, 0x95, 0x2b, 0x4f, - 0x49, 0xb4, 0xb1, 0x2d, 0x61, 0x05, 0xc5, 0xe6, 0x8a, 0xbf, 0xc8, 0x00, 0xa8, 0x3f, 0x3c, 0x0d, - 0x7c, 0x8f, 0xe2, 0x81, 0x3c, 0x3d, 0x05, 0xd3, 0x55, 0x6e, 0x39, 0xc4, 0xb6, 0xc4, 0x35, 0x33, - 0x9f, 0xc6, 0x7b, 0x53, 0xe2, 0x4f, 0xaf, 0xa6, 0xcc, 0xa1, 0x2e, 0x00, 0x78, 0x1d, 0x8c, 0x12, - 0x4c, 0x23, 0x37, 0x34, 0xb3, 0x8b, 0xc6, 0xd2, 0xc4, 0xe5, 0x4b, 0x7d, 0xa1, 0x78, 0x7e, 0xb3, - 0xe4, 0x2b, 0x35, 0x9f, 0x2c, 0x55, 0x42, 0x2b, 0x8c, 0x68, 0xf9, 0x94, 0x44, 0x1a, 0x45, 0xdc, - 0x06, 0x92, 0xb6, 0x8a, 0xdf, 0xc9, 0x80, 0x69, 0x3d, 0x4a, 0x4d, 0x07, 0xef, 0xc3, 0x7d, 0x30, - 0x46, 0x44, 0xb2, 0xf0, 0x38, 0x4d, 0x5c, 0xde, 0x2e, 0x1d, 0x6b, 0x5b, 0x95, 0xba, 0x92, 0xb0, - 0x3c, 0xc1, 0xd6, 0x4c, 0xfe, 0x41, 0x31, 0x1a, 0x7c, 0x1b, 0xe4, 0x89, 0x5c, 0x28, 0x9e, 0x4d, - 0x13, 0x97, 0xbf, 0x38, 0x40, 0x64, 0x61, 0xb8, 0x3c, 0xd9, 0x6e, 0x15, 0xf2, 0xf1, 0x3f, 0xa4, - 0x00, 0x8b, 0xef, 0x65, 0xc0, 0xc2, 0x6a, 0x44, 0x43, 0xbf, 0x81, 0x30, 0xf5, 0x23, 0x52, 0xc5, - 0xab, 0xbe, 0x1b, 0x35, 0xbc, 0x35, 0x5c, 0x73, 0x3c, 0x27, 0x64, 0xd9, 0xba, 0x08, 0x46, 0x3c, - 0xab, 0x81, 0x65, 0xf6, 0x4c, 0xca, 0x98, 0x8e, 0x5c, 0xb3, 0x1a, 0x18, 0x71, 0x0e, 0x93, 0x60, - 0xc9, 0x22, 0xf7, 0x82, 0x92, 0xb8, 0x7e, 0x18, 0x60, 0xc4, 0x39, 0xf0, 0x22, 0x18, 0xad, 0xf9, - 0xa4, 0x61, 0x89, 0x75, 0x1c, 0x4f, 0x56, 0xe6, 0x0a, 0xa7, 0x22, 0xc9, 0x85, 0x4f, 0x83, 0x09, - 0x1b, 0xd3, 0x2a, 0x71, 0x02, 0x06, 0x6d, 0x8e, 0x70, 0xe1, 0xd3, 0x52, 0x78, 0x62, 0x2d, 0x61, - 0x21, 0x5d, 0x0e, 0x5e, 0x02, 0xf9, 0x80, 0x38, 0x3e, 0x71, 0xc2, 0x43, 0x33, 0xb7, 0x68, 0x2c, - 0xe5, 0xca, 0xd3, 0x52, 0x27, 0xbf, 0x2d, 0xe9, 0x48, 0x49, 0xc0, 0x45, 0x90, 0x7f, 0xb1, 0xb2, - 0x75, 0x6d, 0xdb, 0x0a, 0x6f, 0x99, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0x23, 0x45, 0x2d, 0xfe, 0x25, - 0x03, 0xcc, 0x74, 0x54, 0xe2, 0x90, 0xc2, 0x2b, 0x20, 0x4f, 0x43, 0x56, 0x71, 0xea, 0x87, 0x32, - 0x26, 0x8f, 0xc6, 0x60, 0x15, 0x49, 0x3f, 0x6a, 0x15, 0xe6, 0x12, 0x8d, 0x98, 0xca, 0xe3, 0xa1, - 0x74, 0xe1, 0xcf, 0x0c, 0x70, 0x7a, 0x1f, 0xef, 0xde, 0xf2, 0xfd, 0xbd, 0x55, 0xd7, 0xc1, 0x5e, - 0xb8, 0xea, 0x7b, 0x35, 0xa7, 0x2e, 0x73, 0x00, 0x1d, 0x33, 0x07, 0x5e, 0xee, 0xb6, 0x5c, 0x7e, - 0xa0, 0xdd, 0x2a, 0x9c, 0xee, 0xc1, 0x40, 0xbd, 0xfc, 0x80, 0x3b, 0xc0, 0xac, 0xa6, 0x36, 0x89, - 0x2c, 0x60, 0xa2, 0x6c, 0x8d, 0x97, 0xcf, 0xb5, 0x5b, 0x05, 0x73, 0xb5, 0x8f, 0x0c, 0xea, 0xab, - 0x5d, 0xfc, 0x56, 0x36, 0x1d, 0x5e, 0x2d, 0xdd, 0xde, 0x04, 0x79, 0xb6, 0x8d, 0x6d, 0x2b, 0xb4, - 0xe4, 0x46, 0x7c, 0xe2, 0xde, 0x36, 0xbd, 0xa8, 0x19, 0x9b, 0x38, 0xb4, 0xca, 0x50, 0x2e, 0x08, - 0x48, 0x68, 0x48, 0x59, 0x85, 0x5f, 0x05, 0x23, 0x34, 0xc0, 0x55, 0x19, 0xe8, 0x57, 0x8e, 0xbb, - 0xd9, 0xfa, 0x3c, 0x48, 0x25, 0xc0, 0xd5, 0x64, 0x2f, 0xb0, 0x7f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, - 0x30, 0x4a, 0x79, 0x81, 0x92, 0x45, 0xed, 0xb5, 0x61, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x3f, 0x92, - 0xe0, 0xc5, 0x7f, 0x66, 0xc0, 0xf9, 0x7e, 0xaa, 0xab, 0xbe, 0x67, 0x8b, 0xe5, 0xd8, 0x90, 0x7b, - 0x5b, 0x64, 0xfa, 0xd3, 0xfa, 0xde, 0x3e, 0x6a, 0x15, 0x1e, 0xbe, 0xab, 0x01, 0xad, 0x08, 0xfc, - 0x9f, 0x7a, 0x6e, 0x51, 0x28, 0xce, 0x77, 0x3a, 0x76, 0xd4, 0x2a, 0x4c, 0x29, 0xb5, 0x4e, 0x5f, - 0x61, 0x13, 0x40, 0xd7, 0xa2, 0xe1, 0x75, 0x62, 0x79, 0x54, 0x98, 0x75, 0x1a, 0x58, 0x86, 0xef, - 0xd1, 0x7b, 0x4b, 0x0f, 0xa6, 0x51, 0x9e, 0x97, 0x90, 0xf0, 0x6a, 0x97, 0x35, 0xd4, 0x03, 0x81, - 0xd5, 0x2d, 0x82, 0x2d, 0xaa, 0x4a, 0x91, 0x76, 0xa2, 0x30, 0x2a, 0x92, 0x5c, 0xf8, 0x08, 0x18, - 0x6b, 0x60, 0x4a, 0xad, 0x3a, 0xe6, 0xf5, 0x67, 0x3c, 0x39, 0xa2, 0x37, 0x05, 0x19, 0xc5, 0x7c, - 0xd6, 0x9f, 0x9c, 0xeb, 0x17, 0xb5, 0xab, 0x0e, 0x0d, 0xe1, 0xab, 0x5d, 0x1b, 0xa0, 0x74, 0x6f, - 0x4f, 0xc8, 0xb4, 0x79, 0xfa, 0xab, 0xe2, 0x17, 0x53, 0xb4, 0xe4, 0xff, 0x0a, 0xc8, 0x39, 0x21, - 0x6e, 0xc4, 0x67, 0xf7, 0xcb, 0x43, 0xca, 0xbd, 0xf2, 0x49, 0xe9, 0x43, 0x6e, 0x83, 0xa1, 0x21, - 0x01, 0x5a, 0xfc, 0x65, 0x06, 0x3c, 0xd4, 0x4f, 0x85, 0x1d, 0x28, 0x94, 0x45, 0x3c, 0x70, 0x23, - 0x62, 0xb9, 0x32, 0xe3, 0x54, 0xc4, 0xb7, 0x39, 0x15, 0x49, 0x2e, 0x2b, 0xf9, 0xd4, 0xf1, 0xea, - 0x91, 0x6b, 0x11, 0x99, 0x4e, 0xea, 0xa9, 0x2b, 0x92, 0x8e, 0x94, 0x04, 0x2c, 0x01, 0x40, 0x6f, - 0xf9, 0x24, 0xe4, 0x18, 0xb2, 0x7a, 0x9d, 0x62, 0x05, 0xa2, 0xa2, 0xa8, 0x48, 0x93, 0x60, 0x27, - 0xda, 0x9e, 0xe3, 0xd9, 0x72, 0xd5, 0xd5, 0x2e, 0x7e, 0xc9, 0xf1, 0x6c, 0xc4, 0x39, 0x0c, 0xdf, - 0x75, 0x68, 0xc8, 0x28, 0x72, 0xc9, 0x3b, 0xa2, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0xaa, - 0xef, 0x13, 0x07, 0x53, 0x73, 0x34, 0xc1, 0x5f, 0x55, 0x54, 0xa4, 0x49, 0x14, 0xff, 0x91, 0xef, - 0x9f, 0x24, 0xac, 0x94, 0xc0, 0x0b, 0x20, 0x57, 0x27, 0x7e, 0x14, 0xc8, 0x28, 0xa9, 0x68, 0xbf, - 0xc0, 0x88, 0x48, 0xf0, 0x58, 0x56, 0x36, 0x3b, 0xda, 0x54, 0x95, 0x95, 0x71, 0x73, 0x1a, 0xf3, - 0xe1, 0x37, 0x0c, 0x90, 0xf3, 0x64, 0x70, 0x58, 0xca, 0xbd, 0x3a, 0xa4, 0xbc, 0xe0, 0xe1, 0x4d, - 0xdc, 0x15, 0x91, 0x17, 0xc8, 0xf0, 0x29, 0x90, 0xa3, 0x55, 0x3f, 0xc0, 0x32, 0xea, 0x0b, 0xb1, - 0x50, 0x85, 0x11, 0x8f, 0x5a, 0x85, 0x93, 0xb1, 0x39, 0x4e, 0x40, 0x42, 0x18, 0x7e, 0xdb, 0x00, - 0xa0, 0x69, 0xb9, 0x8e, 0x6d, 0xf1, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xdf, 0x54, 0xe6, - 0xc5, 0xa2, 0x25, 0xff, 0x91, 0x06, 0x0d, 0xdf, 0x35, 0xc0, 0x24, 0x8d, 0x76, 0x89, 0xd4, 0xa2, - 0xbc, 0xb9, 0x98, 0xb8, 0xfc, 0xa5, 0x81, 0xfa, 0x52, 0xd1, 0x00, 0xca, 0xd3, 0xed, 0x56, 0x61, - 0x52, 0xa7, 0xa0, 0x0e, 0x07, 0xe0, 0xf7, 0x0c, 0x90, 0x6f, 0xc6, 0x67, 0xf6, 0x18, 0xdf, 0xf0, - 0xaf, 0x0f, 0x69, 0x61, 0x65, 0x46, 0x25, 0xbb, 0x40, 0xf5, 0x01, 0xca, 0x03, 0xf8, 0x7b, 0x03, - 0x98, 0x96, 0x2d, 0x0a, 0xbc, 0xe5, 0x6e, 0x13, 0xc7, 0x0b, 0x31, 0x11, 0xfd, 0x26, 0x35, 0xf3, - 0xdc, 0xbd, 0xc1, 0x9e, 0x85, 0xe9, 0x5e, 0xb6, 0xbc, 0x28, 0xbd, 0x33, 0x57, 0xfa, 0xb8, 0x81, - 0xfa, 0x3a, 0xc8, 0x13, 0x2d, 0x69, 0x69, 0xcc, 0xf1, 0x21, 0x24, 0x5a, 0xd2, 0x4b, 0xc9, 0xea, - 0x90, 0x74, 0x50, 0x1a, 0x34, 0xdc, 0x02, 0xb3, 0x01, 0xc1, 0x1c, 0xe0, 0x86, 0xb7, 0xe7, 0xf9, - 0xfb, 0xde, 0x15, 0x07, 0xbb, 0x36, 0x35, 0xc1, 0xa2, 0xb1, 0x94, 0x2f, 0x9f, 0x6d, 0xb7, 0x0a, - 0xb3, 0xdb, 0xbd, 0x04, 0x50, 0x6f, 0xbd, 0xe2, 0xbb, 0xd9, 0xf4, 0x2d, 0x20, 0xdd, 0x45, 0xc0, - 0xf7, 0xc5, 0xd3, 0x8b, 0xd8, 0x50, 0xd3, 0xe0, 0xab, 0xf5, 0xe6, 0x90, 0x92, 0x49, 0xb5, 0x01, - 0x49, 0x27, 0xa7, 0x48, 0x14, 0x69, 0x7e, 0xc0, 0x1f, 0x1b, 0xe0, 0xa4, 0x55, 0xad, 0xe2, 0x20, - 0xc4, 0xb6, 0x28, 0xee, 0x99, 0xcf, 0xa0, 0x7e, 0xcd, 0x4a, 0xaf, 0x4e, 0xae, 0xe8, 0xd0, 0xa8, - 0xd3, 0x13, 0xf8, 0x1c, 0x38, 0x45, 0x43, 0x9f, 0x60, 0x3b, 0xd5, 0x36, 0xc3, 0x76, 0xab, 0x70, - 0xaa, 0xd2, 0xc1, 0x41, 0x29, 0xc9, 0xe2, 0x5f, 0x73, 0xa0, 0x70, 0x97, 0xad, 0x76, 0x0f, 0x17, - 0xb3, 0x8b, 0x60, 0x94, 0x3f, 0xae, 0xcd, 0xa3, 0x92, 0xd7, 0x5a, 0x41, 0x4e, 0x45, 0x92, 0xcb, - 0x0e, 0x0a, 0x86, 0xcf, 0xda, 0x97, 0x2c, 0x17, 0x54, 0x07, 0x45, 0x45, 0x90, 0x51, 0xcc, 0x87, - 0x97, 0x01, 0xb0, 0x71, 0x40, 0x30, 0x3b, 0xac, 0x6c, 0x73, 0x8c, 0x4b, 0xab, 0x45, 0x5a, 0x53, - 0x1c, 0xa4, 0x49, 0xc1, 0x2b, 0x00, 0xc6, 0xff, 0x1c, 0xdf, 0x7b, 0xd9, 0x22, 0x9e, 0xe3, 0xd5, - 0xcd, 0x3c, 0x77, 0x7b, 0x8e, 0x75, 0x63, 0x6b, 0x5d, 0x5c, 0xd4, 0x43, 0x03, 0xbe, 0x0d, 0x46, - 0xc5, 0xd0, 0x87, 0x9f, 0x10, 0x43, 0xac, 0xf2, 0x80, 0xc7, 0x88, 0x43, 0x21, 0x09, 0xd9, 0x5d, - 0xdd, 0x73, 0xf7, 0xbb, 0xba, 0xdf, 0xb1, 0x9c, 0x8e, 0xfe, 0x87, 0x97, 0xd3, 0xe2, 0xbf, 0x8c, - 0x74, 0xcd, 0xd1, 0x1e, 0xb5, 0x52, 0xb5, 0x5c, 0x0c, 0xd7, 0xc0, 0x34, 0xbb, 0x31, 0x21, 0x1c, - 0xb8, 0x4e, 0xd5, 0xa2, 0xfc, 0xc2, 0x2e, 0x92, 0x5d, 0xcd, 0x90, 0x2a, 0x29, 0x3e, 0xea, 0xd2, - 0x80, 0x2f, 0x02, 0x28, 0x6e, 0x11, 0x1d, 0x76, 0x44, 0x43, 0xa4, 0xee, 0x03, 0x95, 0x2e, 0x09, - 0xd4, 0x43, 0x0b, 0xae, 0x82, 0x19, 0xd7, 0xda, 0xc5, 0x6e, 0x05, 0xbb, 0xb8, 0x1a, 0xfa, 0x84, - 0x9b, 0x12, 0x23, 0x8d, 0xd9, 0x76, 0xab, 0x30, 0x73, 0x35, 0xcd, 0x44, 0xdd, 0xf2, 0xc5, 0xf3, - 0xe9, 0xad, 0xad, 0x3f, 0xb8, 0xb8, 0x9b, 0x7d, 0x90, 0x01, 0xf3, 0xfd, 0x33, 0x03, 0x7e, 0x33, - 0xb9, 0x42, 0x8a, 0x1b, 0xc2, 0xeb, 0xc3, 0xca, 0x42, 0x79, 0x87, 0x04, 0xdd, 0xf7, 0x47, 0xf8, - 0x35, 0xd6, 0xae, 0x59, 0x6e, 0x3c, 0xb4, 0x7a, 0x6d, 0x68, 0x2e, 0x30, 0x90, 0xf2, 0xb8, 0xe8, - 0x04, 0x2d, 0x97, 0x37, 0x7e, 0x96, 0x8b, 0x8b, 0xbf, 0x32, 0xd2, 0x53, 0x84, 0x64, 0x07, 0xc3, - 0xef, 0x1b, 0x60, 0xca, 0x0f, 0xb0, 0xb7, 0xb2, 0xbd, 0x71, 0xf3, 0x7f, 0xc4, 0x4e, 0x96, 0xa1, - 0xba, 0x76, 0x4c, 0x3f, 0x5f, 0xac, 0x6c, 0x5d, 0x13, 0x06, 0xb7, 0x89, 0x1f, 0xd0, 0xf2, 0xe9, - 0x76, 0xab, 0x30, 0xb5, 0xd5, 0x09, 0x85, 0xd2, 0xd8, 0xc5, 0x06, 0x98, 0x5d, 0x3f, 0x08, 0x31, - 0xf1, 0x2c, 0x77, 0xcd, 0xaf, 0x46, 0x0d, 0xec, 0x85, 0xc2, 0xd1, 0xd4, 0xc4, 0xcb, 0xb8, 0xc7, - 0x89, 0xd7, 0x43, 0x20, 0x1b, 0x11, 0x57, 0x66, 0xf1, 0x84, 0x9a, 0xe8, 0xa2, 0xab, 0x88, 0xd1, - 0x8b, 0xe7, 0xc1, 0x08, 0xf3, 0x13, 0x9e, 0x05, 0x59, 0x62, 0xed, 0x73, 0xab, 0x93, 0xe5, 0x31, - 0x26, 0x82, 0xac, 0x7d, 0xc4, 0x68, 0xc5, 0x3f, 0x9f, 0x07, 0x53, 0xa9, 0x67, 0x81, 0xf3, 0x20, - 0xa3, 0xc6, 0xc4, 0x40, 0x1a, 0xcd, 0x6c, 0xac, 0xa1, 0x8c, 0x63, 0xc3, 0x67, 0x54, 0xf1, 0x15, - 0xa0, 0x05, 0x75, 0x96, 0x70, 0x2a, 0xeb, 0xcf, 0x13, 0x73, 0xcc, 0x91, 0xb8, 0x70, 0x32, 0x1f, - 0x70, 0x4d, 0xee, 0x12, 0xe1, 0x03, 0xae, 0x21, 0x46, 0xfb, 0xb4, 0xe3, 0xbe, 0x78, 0xde, 0x98, - 0xbb, 0x87, 0x79, 0xe3, 0xe8, 0x1d, 0xe7, 0x8d, 0x17, 0x40, 0x2e, 0x74, 0x42, 0x17, 0xf3, 0x83, - 0x4c, 0xbb, 0x46, 0x5d, 0x67, 0x44, 0x24, 0x78, 0xf0, 0x2d, 0x30, 0x66, 0xe3, 0x9a, 0x15, 0xb9, - 0x21, 0x3f, 0xb3, 0x26, 0x2e, 0xaf, 0x0e, 0x20, 0x85, 0xc4, 0x30, 0x78, 0x4d, 0xd8, 0x45, 0x31, - 0x00, 0x7c, 0x18, 0x8c, 0x35, 0xac, 0x03, 0xa7, 0x11, 0x35, 0x78, 0x83, 0x69, 0x08, 0xb1, 0x4d, - 0x41, 0x42, 0x31, 0x8f, 0x55, 0x46, 0x7c, 0x50, 0x75, 0x23, 0xea, 0x34, 0xb1, 0x64, 0xca, 0xe6, - 0x4f, 0x55, 0xc6, 0xf5, 0x14, 0x1f, 0x75, 0x69, 0x70, 0x30, 0xc7, 0xe3, 0xca, 0x13, 0x1a, 0x98, - 0x20, 0xa1, 0x98, 0xd7, 0x09, 0x26, 0xe5, 0x27, 0xfb, 0x81, 0x49, 0xe5, 0x2e, 0x0d, 0xf8, 0x18, - 0x18, 0x6f, 0x58, 0x07, 0x57, 0xb1, 0x57, 0x0f, 0x6f, 0x99, 0x27, 0x17, 0x8d, 0xa5, 0x6c, 0xf9, - 0x64, 0xbb, 0x55, 0x18, 0xdf, 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xc7, 0x93, 0xc2, 0xa7, 0x34, - 0xe1, 0x98, 0x88, 0x12, 0x3e, 0xeb, 0x5e, 0x02, 0x2b, 0x64, 0x9b, 0xcb, 0x9c, 0xea, 0xbc, 0xe6, - 0x6e, 0x0b, 0x32, 0x8a, 0xf9, 0x70, 0x09, 0xe4, 0x1b, 0xd6, 0x01, 0x1f, 0x49, 0x98, 0xd3, 0xdc, - 0x2c, 0x1f, 0x8c, 0x6f, 0x4a, 0x1a, 0x52, 0x5c, 0x2e, 0xe9, 0x78, 0x42, 0x72, 0x46, 0x93, 0x94, - 0x34, 0xa4, 0xb8, 0x2c, 0x89, 0x23, 0xcf, 0xb9, 0x1d, 0x61, 0x21, 0x0c, 0x79, 0x64, 0x54, 0x12, - 0xdf, 0x48, 0x58, 0x48, 0x97, 0x83, 0x25, 0x00, 0x1a, 0x91, 0x1b, 0x3a, 0x81, 0x8b, 0xb7, 0x6a, - 0xe6, 0x69, 0x1e, 0x7f, 0xde, 0xf4, 0x6f, 0x2a, 0x2a, 0xd2, 0x24, 0x20, 0x06, 0x23, 0xd8, 0x8b, - 0x1a, 0xe6, 0x19, 0x7e, 0xb0, 0x0f, 0x24, 0x05, 0xd5, 0xce, 0x59, 0xf7, 0xa2, 0x06, 0xe2, 0xe6, - 0xe1, 0x33, 0xe0, 0x64, 0xc3, 0x3a, 0x60, 0xe5, 0x00, 0x93, 0xd0, 0xc1, 0xd4, 0x9c, 0xe5, 0x0f, - 0x3f, 0xc3, 0xba, 0xdd, 0x4d, 0x9d, 0x81, 0x3a, 0xe5, 0xb8, 0xa2, 0xe3, 0x69, 0x8a, 0x73, 0x9a, - 0xa2, 0xce, 0x40, 0x9d, 0x72, 0x2c, 0xd2, 0x04, 0xdf, 0x8e, 0x1c, 0x82, 0x6d, 0xf3, 0x01, 0xde, - 0x20, 0xcb, 0x97, 0x15, 0x82, 0x86, 0x14, 0x17, 0x36, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0x8d, - 0xc1, 0x56, 0xf2, 0x2d, 0xb2, 0x42, 0x88, 0x75, 0x28, 0x4e, 0x1a, 0x7d, 0x6a, 0x05, 0x29, 0xc8, - 0x59, 0xae, 0xbb, 0x55, 0x33, 0xcf, 0xf2, 0xd8, 0x0f, 0xfa, 0x04, 0x51, 0x55, 0x67, 0x85, 0x81, - 0x20, 0x81, 0xc5, 0x40, 0x7d, 0x8f, 0xa5, 0xc6, 0xfc, 0x70, 0x41, 0xb7, 0x18, 0x08, 0x12, 0x58, - 0xfc, 0x49, 0xbd, 0xc3, 0xad, 0x9a, 0xf9, 0xe0, 0x90, 0x9f, 0x94, 0x81, 0x20, 0x81, 0x05, 0x1d, - 0x90, 0xf5, 0xfc, 0xd0, 0x3c, 0x37, 0x94, 0xe3, 0x99, 0x1f, 0x38, 0xd7, 0xfc, 0x10, 0x31, 0x0c, - 0xf8, 0x23, 0x03, 0x80, 0x20, 0x49, 0xd1, 0x87, 0x06, 0x32, 0x12, 0x49, 0x41, 0x96, 0x92, 0xdc, - 0x5e, 0xf7, 0x42, 0x72, 0x98, 0x5c, 0x8f, 0xb4, 0x3d, 0xa0, 0x79, 0x01, 0x7f, 0x6e, 0x80, 0x33, - 0x7a, 0x9b, 0xac, 0xdc, 0x5b, 0xe0, 0x11, 0xb9, 0x3e, 0xe8, 0x34, 0x2f, 0xfb, 0xbe, 0x5b, 0x36, - 0xdb, 0xad, 0xc2, 0x99, 0x95, 0x1e, 0xa8, 0xa8, 0xa7, 0x2f, 0xf0, 0xd7, 0x06, 0x98, 0x91, 0x55, - 0x54, 0xf3, 0xb0, 0xc0, 0x03, 0x88, 0x07, 0x1d, 0xc0, 0x34, 0x8e, 0x88, 0xa3, 0x7a, 0xc9, 0xde, - 0xc5, 0x47, 0xdd, 0xae, 0xc1, 0xdf, 0x19, 0x60, 0xd2, 0xc6, 0x01, 0xf6, 0x6c, 0xec, 0x55, 0x99, - 0xaf, 0x8b, 0x03, 0x19, 0x59, 0xa4, 0x7d, 0x5d, 0xd3, 0x20, 0x84, 0x9b, 0x25, 0xe9, 0xe6, 0xa4, - 0xce, 0x3a, 0x6a, 0x15, 0xe6, 0x12, 0x55, 0x9d, 0x83, 0x3a, 0xbc, 0x84, 0xef, 0x19, 0x60, 0x2a, - 0x59, 0x00, 0x71, 0xa4, 0x9c, 0x1f, 0x62, 0x1e, 0xf0, 0xf6, 0x75, 0xa5, 0x13, 0x10, 0xa5, 0x3d, - 0x80, 0xbf, 0x31, 0x58, 0xa7, 0x16, 0xdf, 0xfb, 0xa8, 0x59, 0xe4, 0xb1, 0x7c, 0x63, 0xe0, 0xb1, - 0x54, 0x08, 0x22, 0x94, 0x97, 0x92, 0x56, 0x50, 0x71, 0x8e, 0x5a, 0x85, 0x59, 0x3d, 0x92, 0x8a, - 0x81, 0x74, 0x0f, 0xe1, 0x77, 0x0d, 0x30, 0x89, 0x93, 0x8e, 0x9b, 0x9a, 0x17, 0x06, 0x12, 0xc4, - 0x9e, 0x4d, 0xbc, 0xb8, 0xa9, 0x6b, 0x2c, 0x8a, 0x3a, 0xb0, 0x59, 0x07, 0x89, 0x0f, 0xac, 0x46, - 0xe0, 0x62, 0xf3, 0xbf, 0x06, 0xdc, 0x41, 0xae, 0x0b, 0xbb, 0x28, 0x06, 0x80, 0x97, 0x40, 0xde, - 0x8b, 0x5c, 0xd7, 0xda, 0x75, 0xb1, 0xf9, 0x30, 0xef, 0x45, 0xd4, 0x48, 0xf6, 0x9a, 0xa4, 0x23, - 0x25, 0x01, 0x6b, 0x60, 0xf1, 0xe0, 0x25, 0xf5, 0x79, 0x52, 0xcf, 0xa1, 0xa1, 0x79, 0x91, 0x5b, - 0x99, 0x6f, 0xb7, 0x0a, 0x73, 0x3b, 0xbd, 0xc7, 0x8a, 0x77, 0xb5, 0x01, 0x5f, 0x01, 0x0f, 0x6a, - 0x32, 0xeb, 0x8d, 0x5d, 0x6c, 0xdb, 0xd8, 0x8e, 0x2f, 0x6e, 0xe6, 0x7f, 0x8b, 0xc1, 0x65, 0xbc, - 0xc1, 0x77, 0xd2, 0x02, 0xe8, 0x4e, 0xda, 0xf0, 0x2a, 0x98, 0xd3, 0xd8, 0x1b, 0x5e, 0xb8, 0x45, - 0x2a, 0x21, 0x71, 0xbc, 0xba, 0xb9, 0xc4, 0xed, 0x9e, 0x89, 0x77, 0xe4, 0x8e, 0xc6, 0x43, 0x7d, - 0x74, 0xe0, 0x17, 0x3a, 0xac, 0xf1, 0x57, 0x68, 0x56, 0xf0, 0x12, 0x3e, 0xa4, 0xe6, 0x23, 0xbc, - 0x3b, 0xe1, 0x8b, 0xbd, 0xa3, 0xd1, 0x51, 0x1f, 0x79, 0xf8, 0x79, 0x70, 0x3a, 0xc5, 0x61, 0x57, - 0x14, 0xf3, 0x51, 0x71, 0xd7, 0x60, 0xfd, 0xec, 0x4e, 0x4c, 0x44, 0xbd, 0x24, 0xe1, 0xe7, 0x00, - 0xd4, 0xc8, 0x9b, 0x56, 0xc0, 0xf5, 0x1f, 0x13, 0xd7, 0x1e, 0xb6, 0xa2, 0x3b, 0x92, 0x86, 0x7a, - 0xc8, 0xc1, 0x9f, 0x18, 0x1d, 0x4f, 0x92, 0xdc, 0x8e, 0xa9, 0x79, 0x89, 0xef, 0xdf, 0xcd, 0x63, - 0x66, 0xa1, 0xf6, 0x1e, 0x24, 0x72, 0xb1, 0x16, 0x66, 0x0d, 0x0a, 0xf5, 0x71, 0x61, 0x9e, 0xdd, - 0xd0, 0x53, 0x15, 0x1e, 0x4e, 0x83, 0xec, 0x1e, 0x96, 0x5f, 0x55, 0x20, 0xf6, 0x13, 0xda, 0x20, - 0xd7, 0xb4, 0xdc, 0x28, 0x1e, 0x32, 0x0c, 0xb8, 0x3b, 0x40, 0xc2, 0xf8, 0x73, 0x99, 0x67, 0x8d, - 0xf9, 0xf7, 0x0d, 0x30, 0xd7, 0xfb, 0xe0, 0xb9, 0xaf, 0x6e, 0xfd, 0xd4, 0x00, 0x33, 0x5d, 0x67, - 0x4c, 0x0f, 0x8f, 0x6e, 0x77, 0x7a, 0xf4, 0xca, 0xa0, 0x0f, 0x0b, 0xb1, 0x39, 0x78, 0x87, 0xac, - 0xbb, 0xf7, 0x03, 0x03, 0x4c, 0xa7, 0xcb, 0xf6, 0xfd, 0x8c, 0x57, 0xf1, 0xfd, 0x0c, 0x98, 0xeb, - 0xdd, 0xd8, 0x43, 0xa2, 0x26, 0x18, 0xc3, 0x99, 0x04, 0xf5, 0x9a, 0x1a, 0xbf, 0x63, 0x80, 0x89, - 0xb7, 0x94, 0x5c, 0xfc, 0xd6, 0x7d, 0xe0, 0x33, 0xa8, 0xf8, 0x9c, 0x4c, 0x18, 0x14, 0xe9, 0xb8, - 0xc5, 0xdf, 0x1a, 0x60, 0xb6, 0x67, 0x03, 0x00, 0x2f, 0x82, 0x51, 0xcb, 0x75, 0xfd, 0x7d, 0x31, - 0x4a, 0xd4, 0xde, 0x11, 0xac, 0x70, 0x2a, 0x92, 0x5c, 0x2d, 0x7a, 0x99, 0xcf, 0x2a, 0x7a, 0xc5, - 0x3f, 0x18, 0xe0, 0xdc, 0x9d, 0x32, 0xf1, 0xbe, 0x2c, 0xe9, 0x12, 0xc8, 0xcb, 0xe6, 0xfd, 0x90, - 0x2f, 0xa7, 0x2c, 0xc5, 0xb2, 0x68, 0xf0, 0x0f, 0xcd, 0xc4, 0xaf, 0xe2, 0x07, 0x06, 0x98, 0xae, - 0x60, 0xd2, 0x74, 0xaa, 0x18, 0xe1, 0x1a, 0x26, 0xd8, 0xab, 0x62, 0xb8, 0x0c, 0xc6, 0xf9, 0xeb, - 0xee, 0xc0, 0xaa, 0xc6, 0xaf, 0x6e, 0x66, 0x64, 0xc8, 0xc7, 0xaf, 0xc5, 0x0c, 0x94, 0xc8, 0xa8, - 0xd7, 0x3c, 0x99, 0xbe, 0xaf, 0x79, 0xce, 0x81, 0x91, 0x20, 0x19, 0x44, 0xe7, 0x19, 0x97, 0xcf, - 0x9e, 0x39, 0x95, 0x73, 0x7d, 0x12, 0xf2, 0xe9, 0x5a, 0x4e, 0x72, 0x7d, 0x12, 0x22, 0x4e, 0x2d, - 0xfe, 0x29, 0x03, 0x4e, 0x75, 0xd6, 0x71, 0x06, 0x48, 0x22, 0xb7, 0xeb, 0xbd, 0x12, 0xe3, 0x21, - 0xce, 0xd1, 0x3f, 0x77, 0xc9, 0xdc, 0xf9, 0x73, 0x17, 0xf8, 0x02, 0x98, 0x91, 0x3f, 0xd7, 0x0f, - 0x02, 0x82, 0x29, 0x7f, 0x77, 0x9a, 0xed, 0xfc, 0x68, 0x76, 0x33, 0x2d, 0x80, 0xba, 0x75, 0xe0, - 0xff, 0xa7, 0x3e, 0xc5, 0xb9, 0x90, 0x7c, 0x86, 0xc3, 0x5a, 0x42, 0xde, 0x67, 0xdc, 0x64, 0x65, - 0x60, 0x9d, 0x10, 0x9f, 0xa4, 0xbe, 0xcf, 0x59, 0x06, 0xe3, 0x35, 0x26, 0xc0, 0xe7, 0xf5, 0xb9, - 0xce, 0xa0, 0x5f, 0x89, 0x19, 0x28, 0x91, 0x81, 0xcf, 0x83, 0x29, 0x3f, 0x10, 0x1d, 0xf0, 0x96, - 0x6b, 0x57, 0xb0, 0x5b, 0xe3, 0x93, 0xc4, 0x7c, 0x3c, 0xee, 0xed, 0x60, 0xa1, 0xb4, 0x6c, 0xf1, - 0x8f, 0x06, 0xe8, 0xf5, 0xa1, 0x1d, 0x3c, 0x2b, 0xc6, 0xb6, 0xda, 0x2c, 0x34, 0x1e, 0xd9, 0xc2, - 0x26, 0x18, 0xa3, 0x22, 0x57, 0x64, 0x2e, 0x6f, 0x1d, 0x33, 0x97, 0xd3, 0x99, 0x27, 0xfa, 0xc5, - 0x98, 0x1a, 0x83, 0xb1, 0x74, 0xae, 0x5a, 0xe5, 0xc8, 0xb3, 0xe5, 0x24, 0x7f, 0x52, 0xa4, 0xf3, - 0xea, 0x8a, 0xa0, 0x21, 0xc5, 0x2d, 0x57, 0x3f, 0xfc, 0x64, 0xe1, 0xc4, 0x47, 0x9f, 0x2c, 0x9c, - 0xf8, 0xf8, 0x93, 0x85, 0x13, 0x5f, 0x6f, 0x2f, 0x18, 0x1f, 0xb6, 0x17, 0x8c, 0x8f, 0xda, 0x0b, - 0xc6, 0xc7, 0xed, 0x05, 0xe3, 0x6f, 0xed, 0x05, 0xe3, 0x87, 0x7f, 0x5f, 0x38, 0xf1, 0xe5, 0xe7, - 0x8f, 0xf5, 0x6d, 0xfb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x16, 0x56, 0x59, 0x35, 0x34, 0x2f, - 0x00, 0x00, + proto.RegisterFile("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_3623d6c0bd238430) +} + +var fileDescriptor_3623d6c0bd238430 = []byte{ + // 3214 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0xcf, 0x73, 0x1c, 0x57, + 0xf1, 0xf7, 0xac, 0xb4, 0xd2, 0xaa, 0x25, 0x5b, 0xd2, 0xb3, 0xa5, 0x8c, 0x15, 0x47, 0x2b, 0xaf, + 0xbf, 0xf1, 0x57, 0x49, 0x9c, 0x55, 0xe2, 0x6f, 0xf2, 0x4d, 0x08, 0xa4, 0x52, 0x5a, 0x49, 0x0e, + 0x4a, 0x2c, 0x4b, 0xbc, 0xb5, 0x1d, 0x41, 0x7e, 0x8e, 0x76, 0x9e, 0xd6, 0x63, 0xcf, 0xce, 0x8c, + 0xe7, 0xcd, 0xac, 0xa4, 0x0a, 0x50, 0x40, 0x2a, 0x05, 0x45, 0x01, 0xa1, 0x48, 0x2e, 0x14, 0x70, + 0x08, 0x14, 0x17, 0x0e, 0x70, 0x80, 0x1b, 0xfc, 0x01, 0x39, 0xa6, 0x80, 0x43, 0x0e, 0xd4, 0x16, + 0x59, 0xfe, 0x05, 0x0a, 0xaa, 0x74, 0xa2, 0xde, 0x8f, 0x99, 0x79, 0x33, 0xbb, 0x6b, 0xbb, 0xa2, + 0xdd, 0xb8, 0xb8, 0x69, 0xbb, 0xfb, 0xf5, 0xa7, 0xa7, 0x5f, 0xbf, 0xee, 0x7e, 0x3d, 0x23, 0xd8, + 0xb8, 0xf5, 0x2c, 0x2d, 0x5b, 0xee, 0x92, 0xe1, 0x59, 0x64, 0x3f, 0x20, 0x0e, 0xb5, 0x5c, 0x87, + 0x3e, 0x6e, 0x78, 0x16, 0x25, 0x7e, 0x93, 0xf8, 0x4b, 0xde, 0xad, 0x3a, 0xe3, 0xd1, 0xb4, 0xc0, + 0x52, 0xf3, 0xc9, 0x1d, 0x12, 0x18, 0x4f, 0x2e, 0xd5, 0x89, 0x43, 0x7c, 0x23, 0x20, 0x66, 0xd9, + 0xf3, 0xdd, 0xc0, 0x45, 0xcf, 0x0b, 0x75, 0xe5, 0x94, 0xf4, 0x9b, 0xb1, 0xba, 0xb2, 0x77, 0xab, + 0xce, 0x78, 0x34, 0x2d, 0x50, 0x96, 0xea, 0xe6, 0x1e, 0xaf, 0x5b, 0xc1, 0x8d, 0x70, 0xa7, 0x5c, + 0x73, 0x1b, 0x4b, 0x75, 0xb7, 0xee, 0x2e, 0x71, 0xad, 0x3b, 0xe1, 0x2e, 0xff, 0xc5, 0x7f, 0xf0, + 0xbf, 0x04, 0xda, 0xdc, 0x53, 0x89, 0xf1, 0x0d, 0xa3, 0x76, 0xc3, 0x72, 0x88, 0x7f, 0x90, 0x58, + 0xdc, 0x20, 0x81, 0xb1, 0xd4, 0xec, 0xb0, 0x71, 0x6e, 0xa9, 0xd7, 0x2a, 0x3f, 0x74, 0x02, 0xab, + 0x41, 0x3a, 0x16, 0xfc, 0xff, 0xdd, 0x16, 0xd0, 0xda, 0x0d, 0xd2, 0x30, 0xb2, 0xeb, 0x4a, 0x87, + 0x1a, 0x4c, 0xaf, 0xb8, 0x4e, 0x93, 0xf8, 0xec, 0x29, 0x31, 0xb9, 0x1d, 0x12, 0x1a, 0xa0, 0x0a, + 0x0c, 0x85, 0x96, 0xa9, 0x6b, 0x0b, 0xda, 0xe2, 0x58, 0xe5, 0x89, 0x8f, 0x5a, 0xc5, 0x63, 0xed, + 0x56, 0x71, 0xe8, 0xda, 0xfa, 0xea, 0x61, 0xab, 0x78, 0xb6, 0x17, 0x52, 0x70, 0xe0, 0x11, 0x5a, + 0xbe, 0xb6, 0xbe, 0x8a, 0xd9, 0x62, 0xf4, 0x22, 0x4c, 0x9b, 0x84, 0x5a, 0x3e, 0x31, 0x97, 0xb7, + 0xd6, 0xaf, 0x0b, 0xfd, 0x7a, 0x8e, 0x6b, 0x3c, 0x2d, 0x35, 0x4e, 0xaf, 0x66, 0x05, 0x70, 0xe7, + 0x1a, 0xb4, 0x0d, 0xa3, 0xee, 0xce, 0x4d, 0x52, 0x0b, 0xa8, 0x3e, 0xb4, 0x30, 0xb4, 0x38, 0x7e, + 0xf1, 0xf1, 0x72, 0xb2, 0x83, 0xb1, 0x09, 0x7c, 0xdb, 0xe4, 0xc3, 0x96, 0xb1, 0xb1, 0xb7, 0x16, + 0xed, 0x5c, 0x65, 0x52, 0xa2, 0x8d, 0x6e, 0x0a, 0x2d, 0x38, 0x52, 0x57, 0xfa, 0x55, 0x0e, 0x90, + 0xfa, 0xf0, 0xd4, 0x73, 0x1d, 0x4a, 0xfa, 0xf2, 0xf4, 0x14, 0xa6, 0x6a, 0x5c, 0x73, 0x40, 0x4c, + 0x89, 0xab, 0xe7, 0x3e, 0x8b, 0xf5, 0xba, 0xc4, 0x9f, 0x5a, 0xc9, 0xa8, 0xc3, 0x1d, 0x00, 0xe8, + 0x2a, 0x8c, 0xf8, 0x84, 0x86, 0x76, 0xa0, 0x0f, 0x2d, 0x68, 0x8b, 0xe3, 0x17, 0x2f, 0xf4, 0x84, + 0xe2, 0xf1, 0xcd, 0x82, 0xaf, 0xdc, 0x7c, 0xb2, 0x5c, 0x0d, 0x8c, 0x20, 0xa4, 0x95, 0x13, 0x12, + 0x69, 0x04, 0x73, 0x1d, 0x58, 0xea, 0x2a, 0x7d, 0x2f, 0x07, 0x53, 0xaa, 0x97, 0x9a, 0x16, 0xd9, + 0x43, 0x7b, 0x30, 0xea, 0x8b, 0x60, 0xe1, 0x7e, 0x1a, 0xbf, 0xb8, 0x55, 0x3e, 0xd2, 0xb1, 0x2a, + 0x77, 0x04, 0x61, 0x65, 0x9c, 0xed, 0x99, 0xfc, 0x81, 0x23, 0x34, 0xf4, 0x36, 0x14, 0x7c, 0xb9, + 0x51, 0x3c, 0x9a, 0xc6, 0x2f, 0x7e, 0xa5, 0x8f, 0xc8, 0x42, 0x71, 0x65, 0xa2, 0xdd, 0x2a, 0x16, + 0xa2, 0x5f, 0x38, 0x06, 0x2c, 0xbd, 0x9f, 0x83, 0xf9, 0x95, 0x90, 0x06, 0x6e, 0x03, 0x13, 0xea, + 0x86, 0x7e, 0x8d, 0xac, 0xb8, 0x76, 0xd8, 0x70, 0x56, 0xc9, 0xae, 0xe5, 0x58, 0x01, 0x8b, 0xd6, + 0x05, 0x18, 0x76, 0x8c, 0x06, 0x91, 0xd1, 0x33, 0x21, 0x7d, 0x3a, 0x7c, 0xc5, 0x68, 0x10, 0xcc, + 0x39, 0x4c, 0x82, 0x05, 0x8b, 0x3c, 0x0b, 0xb1, 0xc4, 0xd5, 0x03, 0x8f, 0x60, 0xce, 0x41, 0xe7, + 0x61, 0x64, 0xd7, 0xf5, 0x1b, 0x86, 0xd8, 0xc7, 0xb1, 0x64, 0x67, 0x2e, 0x71, 0x2a, 0x96, 0x5c, + 0xf4, 0x34, 0x8c, 0x9b, 0x84, 0xd6, 0x7c, 0xcb, 0x63, 0xd0, 0xfa, 0x30, 0x17, 0x3e, 0x29, 0x85, + 0xc7, 0x57, 0x13, 0x16, 0x56, 0xe5, 0xd0, 0x05, 0x28, 0x78, 0xbe, 0xe5, 0xfa, 0x56, 0x70, 0xa0, + 0xe7, 0x17, 0xb4, 0xc5, 0x7c, 0x65, 0x4a, 0xae, 0x29, 0x6c, 0x49, 0x3a, 0x8e, 0x25, 0xd0, 0x02, + 0x14, 0x5e, 0xaa, 0x6e, 0x5e, 0xd9, 0x32, 0x82, 0x1b, 0xfa, 0x08, 0x47, 0x18, 0x66, 0xd2, 0x38, + 0xa6, 0x96, 0xfe, 0x96, 0x03, 0x3d, 0xeb, 0x95, 0xc8, 0xa5, 0xe8, 0x12, 0x14, 0x68, 0xc0, 0x32, + 0x4e, 0xfd, 0x40, 0xfa, 0xe4, 0xd1, 0x08, 0xac, 0x2a, 0xe9, 0x87, 0xad, 0xe2, 0x6c, 0xb2, 0x22, + 0xa2, 0x72, 0x7f, 0xc4, 0x6b, 0xd1, 0x2f, 0x34, 0x38, 0xb9, 0x47, 0x76, 0x6e, 0xb8, 0xee, 0xad, + 0x15, 0xdb, 0x22, 0x4e, 0xb0, 0xe2, 0x3a, 0xbb, 0x56, 0x5d, 0xc6, 0x00, 0x3e, 0x62, 0x0c, 0xbc, + 0xd2, 0xa9, 0xb9, 0xf2, 0x40, 0xbb, 0x55, 0x3c, 0xd9, 0x85, 0x81, 0xbb, 0xd9, 0x81, 0xb6, 0x41, + 0xaf, 0x65, 0x0e, 0x89, 0x4c, 0x60, 0x22, 0x6d, 0x8d, 0x55, 0xce, 0xb4, 0x5b, 0x45, 0x7d, 0xa5, + 0x87, 0x0c, 0xee, 0xb9, 0xba, 0xf4, 0xce, 0x50, 0xd6, 0xbd, 0x4a, 0xb8, 0xbd, 0x05, 0x05, 0x76, + 0x8c, 0x4d, 0x23, 0x30, 0xe4, 0x41, 0x7c, 0xe2, 0xde, 0x0e, 0xbd, 0xc8, 0x19, 0x1b, 0x24, 0x30, + 0x2a, 0x48, 0x6e, 0x08, 0x24, 0x34, 0x1c, 0x6b, 0x45, 0xdf, 0x80, 0x61, 0xea, 0x91, 0x9a, 0x74, + 0xf4, 0xab, 0x47, 0x3d, 0x6c, 0x3d, 0x1e, 0xa4, 0xea, 0x91, 0x5a, 0x72, 0x16, 0xd8, 0x2f, 0xcc, + 0x61, 0xd1, 0xbb, 0x1a, 0x8c, 0x50, 0x9e, 0xa0, 0x64, 0x52, 0x7b, 0x7d, 0x50, 0x16, 0x64, 0xb2, + 0xa0, 0xf8, 0x8d, 0x25, 0x78, 0xe9, 0x9f, 0x39, 0x38, 0xdb, 0x6b, 0xe9, 0x8a, 0xeb, 0x98, 0x62, + 0x3b, 0xd6, 0xe5, 0xd9, 0x16, 0x91, 0xfe, 0xb4, 0x7a, 0xb6, 0x0f, 0x5b, 0xc5, 0x87, 0xef, 0xaa, + 0x40, 0x49, 0x02, 0x5f, 0x88, 0x9f, 0x5b, 0x24, 0x8a, 0xb3, 0x69, 0xc3, 0x0e, 0x5b, 0xc5, 0xc9, + 0x78, 0x59, 0xda, 0x56, 0xd4, 0x04, 0x64, 0x1b, 0x34, 0xb8, 0xea, 0x1b, 0x0e, 0x15, 0x6a, 0xad, + 0x06, 0x91, 0xee, 0x7b, 0xf4, 0xde, 0xc2, 0x83, 0xad, 0xa8, 0xcc, 0x49, 0x48, 0x74, 0xb9, 0x43, + 0x1b, 0xee, 0x82, 0xc0, 0xf2, 0x96, 0x4f, 0x0c, 0x1a, 0xa7, 0x22, 0xa5, 0xa2, 0x30, 0x2a, 0x96, + 0x5c, 0xf4, 0x08, 0x8c, 0x36, 0x08, 0xa5, 0x46, 0x9d, 0xf0, 0xfc, 0x33, 0x96, 0x94, 0xe8, 0x0d, + 0x41, 0xc6, 0x11, 0x9f, 0xf5, 0x27, 0x67, 0x7a, 0x79, 0xed, 0xb2, 0x45, 0x03, 0xf4, 0x5a, 0xc7, + 0x01, 0x28, 0xdf, 0xdb, 0x13, 0xb2, 0xd5, 0x3c, 0xfc, 0xe3, 0xe4, 0x17, 0x51, 0x94, 0xe0, 0xff, + 0x3a, 0xe4, 0xad, 0x80, 0x34, 0xa2, 0xda, 0xfd, 0xca, 0x80, 0x62, 0xaf, 0x72, 0x5c, 0xda, 0x90, + 0x5f, 0x67, 0x68, 0x58, 0x80, 0x96, 0x7e, 0x9d, 0x83, 0x87, 0x7a, 0x2d, 0x61, 0x05, 0x85, 0x32, + 0x8f, 0x7b, 0x76, 0xe8, 0x1b, 0xb6, 0x8c, 0xb8, 0xd8, 0xe3, 0x5b, 0x9c, 0x8a, 0x25, 0x97, 0xa5, + 0x7c, 0x6a, 0x39, 0xf5, 0xd0, 0x36, 0x7c, 0x19, 0x4e, 0xf1, 0x53, 0x57, 0x25, 0x1d, 0xc7, 0x12, + 0xa8, 0x0c, 0x40, 0x6f, 0xb8, 0x7e, 0xc0, 0x31, 0x64, 0xf6, 0x3a, 0xc1, 0x12, 0x44, 0x35, 0xa6, + 0x62, 0x45, 0x82, 0x55, 0xb4, 0x5b, 0x96, 0x63, 0xca, 0x5d, 0x8f, 0x4f, 0xf1, 0xcb, 0x96, 0x63, + 0x62, 0xce, 0x61, 0xf8, 0xb6, 0x45, 0x03, 0x46, 0x91, 0x5b, 0x9e, 0xf2, 0x3a, 0x97, 0x8c, 0x25, + 0x18, 0x7e, 0x8d, 0x65, 0x7d, 0xd7, 0xb7, 0x08, 0xd5, 0x47, 0x12, 0xfc, 0x95, 0x98, 0x8a, 0x15, + 0x89, 0xd2, 0x3b, 0xd0, 0x3b, 0x48, 0x58, 0x2a, 0x41, 0xe7, 0x20, 0x5f, 0xf7, 0xdd, 0xd0, 0x93, + 0x5e, 0x8a, 0xbd, 0xfd, 0x22, 0x23, 0x62, 0xc1, 0x63, 0x51, 0xd9, 0x4c, 0xb5, 0xa9, 0x71, 0x54, + 0x46, 0xcd, 0x69, 0xc4, 0x47, 0xdf, 0xd6, 0x20, 0xef, 0x48, 0xe7, 0xb0, 0x90, 0x7b, 0x6d, 0x40, + 0x71, 0xc1, 0xdd, 0x9b, 0x98, 0x2b, 0x3c, 0x2f, 0x90, 0xd1, 0x53, 0x90, 0xa7, 0x35, 0xd7, 0x23, + 0xd2, 0xeb, 0xf3, 0x91, 0x50, 0x95, 0x11, 0x0f, 0x5b, 0xc5, 0xe3, 0x91, 0x3a, 0x4e, 0xc0, 0x42, + 0x18, 0x7d, 0x57, 0x03, 0x68, 0x1a, 0xb6, 0x65, 0x1a, 0xbc, 0x65, 0xc8, 0x73, 0xf3, 0xfb, 0x1b, + 0xd6, 0xd7, 0x63, 0xf5, 0x62, 0xd3, 0x92, 0xdf, 0x58, 0x81, 0x46, 0xef, 0x69, 0x30, 0x41, 0xc3, + 0x1d, 0x5f, 0xae, 0xa2, 0xbc, 0xb9, 0x18, 0xbf, 0xf8, 0xd5, 0xbe, 0xda, 0x52, 0x55, 0x00, 0x2a, + 0x53, 0xed, 0x56, 0x71, 0x42, 0xa5, 0xe0, 0x94, 0x01, 0xe8, 0x07, 0x1a, 0x14, 0x9a, 0x51, 0xcd, + 0x1e, 0xe5, 0x07, 0xfe, 0x8d, 0x01, 0x6d, 0xac, 0x8c, 0xa8, 0xe4, 0x14, 0xc4, 0x7d, 0x40, 0x6c, + 0x01, 0xfa, 0xa3, 0x06, 0xba, 0x61, 0x8a, 0x04, 0x6f, 0xd8, 0x5b, 0xbe, 0xe5, 0x04, 0xc4, 0x17, + 0xfd, 0x26, 0xd5, 0x0b, 0xdc, 0xbc, 0xfe, 0xd6, 0xc2, 0x6c, 0x2f, 0x5b, 0x59, 0x90, 0xd6, 0xe9, + 0xcb, 0x3d, 0xcc, 0xc0, 0x3d, 0x0d, 0x44, 0x1f, 0x68, 0x30, 0x45, 0x89, 0x4d, 0x6a, 0x81, 0xb1, + 0x63, 0x93, 0x4b, 0x16, 0xb1, 0x4d, 0xaa, 0x8f, 0x73, 0xab, 0xaf, 0x1c, 0xd1, 0xea, 0x6a, 0x5a, + 0x6d, 0x72, 0x45, 0xca, 0x30, 0x28, 0xee, 0xb0, 0x80, 0xc7, 0x7f, 0xd2, 0x69, 0xe9, 0x63, 0x03, + 0x88, 0xff, 0xa4, 0xc5, 0x93, 0x49, 0x2b, 0x69, 0xec, 0x14, 0x68, 0xb4, 0x09, 0x33, 0x9e, 0x4f, + 0x38, 0xc0, 0x35, 0xe7, 0x96, 0xe3, 0xee, 0x39, 0xd2, 0x49, 0xb0, 0xa0, 0x2d, 0x16, 0x2a, 0xa7, + 0xdb, 0xad, 0xe2, 0xcc, 0x56, 0x37, 0x01, 0xdc, 0x7d, 0x5d, 0xe9, 0xbd, 0xa1, 0xec, 0xe5, 0x24, + 0xdb, 0xdc, 0xb0, 0x4d, 0x61, 0x26, 0x88, 0x2d, 0xa3, 0xba, 0xc6, 0xb7, 0xe3, 0xad, 0x01, 0xc5, + 0x78, 0xdc, 0x9d, 0x24, 0x0d, 0x66, 0x4c, 0xa2, 0x58, 0xb1, 0x03, 0xfd, 0x54, 0x83, 0xe3, 0x46, + 0xad, 0x46, 0xbc, 0x80, 0x98, 0xa2, 0xe6, 0xe4, 0x3e, 0x87, 0xb4, 0x3a, 0x23, 0xad, 0x3a, 0xbe, + 0xac, 0x42, 0xe3, 0xb4, 0x25, 0xe8, 0x39, 0x38, 0x41, 0x03, 0xd7, 0x27, 0x66, 0xa6, 0x9b, 0x47, + 0xed, 0x56, 0xf1, 0x44, 0x35, 0xc5, 0xc1, 0x19, 0xc9, 0xd2, 0xbf, 0x46, 0xa0, 0x78, 0x97, 0x0c, + 0x70, 0x0f, 0xf7, 0xc5, 0xf3, 0x30, 0xc2, 0x1f, 0xd7, 0xe4, 0x5e, 0x29, 0x28, 0x1d, 0x2a, 0xa7, + 0x62, 0xc9, 0x65, 0xf5, 0x8b, 0xe1, 0xb3, 0xae, 0x6a, 0x88, 0x0b, 0xc6, 0xf5, 0xab, 0x2a, 0xc8, + 0x38, 0xe2, 0xa3, 0x8b, 0x00, 0x26, 0xf1, 0x7c, 0xc2, 0x6a, 0xa8, 0xa9, 0x8f, 0x72, 0xe9, 0x78, + 0x93, 0x56, 0x63, 0x0e, 0x56, 0xa4, 0xd0, 0x25, 0x40, 0xd1, 0x2f, 0xcb, 0x75, 0x5e, 0x31, 0x7c, + 0xc7, 0x72, 0xea, 0x7a, 0x81, 0x9b, 0x3d, 0xcb, 0x9a, 0xc4, 0xd5, 0x0e, 0x2e, 0xee, 0xb2, 0x02, + 0xbd, 0x0d, 0x23, 0x62, 0x16, 0xc5, 0x0b, 0xd7, 0x00, 0x8b, 0x0f, 0x70, 0x1f, 0x71, 0x28, 0x2c, + 0x21, 0x3b, 0x8b, 0x4e, 0xfe, 0x7e, 0x17, 0x9d, 0x3b, 0x66, 0xf9, 0x91, 0xff, 0xca, 0x2c, 0x3f, + 0x76, 0xbf, 0xb3, 0x7c, 0xe9, 0xdf, 0x5a, 0x36, 0x15, 0x2a, 0x3b, 0x50, 0xad, 0x19, 0x36, 0x41, + 0xab, 0x30, 0xc5, 0xee, 0x97, 0x98, 0x78, 0xb6, 0x55, 0x33, 0x28, 0x1f, 0x6f, 0x88, 0x33, 0x98, + 0x00, 0x65, 0xf8, 0xb8, 0x63, 0x05, 0x7a, 0x09, 0x90, 0xb8, 0x73, 0xa5, 0xf4, 0x88, 0xf6, 0x31, + 0xbe, 0x3d, 0x55, 0x3b, 0x24, 0x70, 0x97, 0x55, 0x68, 0x05, 0xa6, 0x6d, 0x63, 0x87, 0xd8, 0xe2, + 0xf9, 0x5c, 0x9f, 0xab, 0x12, 0x03, 0xa0, 0x99, 0x76, 0xab, 0x38, 0x7d, 0x39, 0xcb, 0xc4, 0x9d, + 0xf2, 0xa5, 0xb3, 0xd9, 0x8c, 0xa3, 0x3e, 0xb8, 0xb8, 0xc9, 0x7e, 0x98, 0x83, 0xb9, 0xde, 0x01, + 0x8b, 0xbe, 0x93, 0x5c, 0xb8, 0xc5, 0x7d, 0xea, 0x8d, 0x41, 0x1d, 0x0e, 0x79, 0xe3, 0x86, 0xce, + 0xdb, 0x36, 0xfa, 0x26, 0x6b, 0x6e, 0x0d, 0x3b, 0x1a, 0xf1, 0xbd, 0x3e, 0x30, 0x13, 0x18, 0x48, + 0x65, 0x4c, 0xf4, 0xcd, 0x86, 0xcd, 0xdb, 0x64, 0xc3, 0x26, 0xa5, 0xdf, 0x68, 0xd9, 0x99, 0x4b, + 0x92, 0x58, 0xd0, 0x0f, 0x35, 0x98, 0x74, 0x3d, 0xe2, 0x2c, 0x6f, 0xad, 0x5f, 0xff, 0x3f, 0x91, + 0x60, 0xa4, 0xab, 0x8e, 0x1a, 0xf3, 0x2f, 0x55, 0x37, 0xaf, 0x08, 0x85, 0x5b, 0xbe, 0xeb, 0xd1, + 0xca, 0xc9, 0x76, 0xab, 0x38, 0xb9, 0x99, 0x86, 0xc2, 0x59, 0xec, 0x52, 0x03, 0x66, 0xd6, 0xf6, + 0x03, 0xe2, 0x3b, 0x86, 0xbd, 0xea, 0xd6, 0xc2, 0x06, 0x71, 0x02, 0x61, 0x68, 0x66, 0x3e, 0xa8, + 0xdd, 0xe3, 0x7c, 0xf0, 0x21, 0x18, 0x0a, 0x7d, 0x5b, 0x46, 0xf1, 0x78, 0x3c, 0xff, 0xc6, 0x97, + 0x31, 0xa3, 0x97, 0xce, 0xc2, 0x30, 0xb3, 0x13, 0x9d, 0x86, 0x21, 0xdf, 0xd8, 0xe3, 0x5a, 0x27, + 0x2a, 0xa3, 0x4c, 0x04, 0x1b, 0x7b, 0x98, 0xd1, 0x4a, 0x7f, 0x3d, 0x0b, 0x93, 0x99, 0x67, 0x41, + 0x73, 0x90, 0x8b, 0x87, 0xea, 0x20, 0x95, 0xe6, 0xd6, 0x57, 0x71, 0xce, 0x32, 0xd1, 0x33, 0x71, + 0x4d, 0x10, 0xa0, 0xc5, 0xb8, 0xc4, 0x71, 0x2a, 0xbb, 0xcd, 0x24, 0xea, 0x98, 0x21, 0x51, 0x3e, + 0x67, 0x36, 0x90, 0x5d, 0x79, 0x4a, 0x84, 0x0d, 0x64, 0x17, 0x33, 0xda, 0x67, 0x1d, 0x8e, 0x46, + 0xd3, 0xd9, 0xfc, 0x3d, 0x4c, 0x67, 0x47, 0xee, 0x38, 0x9d, 0x3d, 0x07, 0xf9, 0xc0, 0x0a, 0x6c, + 0xc2, 0xeb, 0xab, 0x72, 0xe9, 0xbc, 0xca, 0x88, 0x58, 0xf0, 0xd0, 0x4d, 0x18, 0x35, 0xc9, 0xae, + 0x11, 0xda, 0x01, 0x2f, 0xa5, 0xe3, 0x17, 0x57, 0xfa, 0x10, 0x42, 0x62, 0x74, 0xbe, 0x2a, 0xf4, + 0xe2, 0x08, 0x00, 0x3d, 0x0c, 0xa3, 0x0d, 0x63, 0xdf, 0x6a, 0x84, 0x0d, 0xde, 0xf7, 0x6a, 0x42, + 0x6c, 0x43, 0x90, 0x70, 0xc4, 0x63, 0x99, 0x91, 0xec, 0xd7, 0xec, 0x90, 0x5a, 0x4d, 0x22, 0x99, + 0xb2, 0x27, 0x8d, 0x33, 0xe3, 0x5a, 0x86, 0x8f, 0x3b, 0x56, 0x70, 0x30, 0xcb, 0xe1, 0x8b, 0xc7, + 0x15, 0x30, 0x41, 0xc2, 0x11, 0x2f, 0x0d, 0x26, 0xe5, 0x27, 0x7a, 0x81, 0xc9, 0xc5, 0x1d, 0x2b, + 0xd0, 0x63, 0x30, 0xd6, 0x30, 0xf6, 0x2f, 0x13, 0xa7, 0x1e, 0xdc, 0xd0, 0x8f, 0x2f, 0x68, 0x8b, + 0x43, 0x95, 0xe3, 0xed, 0x56, 0x71, 0x6c, 0x23, 0x22, 0xe2, 0x84, 0xcf, 0x85, 0x2d, 0x47, 0x0a, + 0x9f, 0x50, 0x84, 0x23, 0x22, 0x4e, 0xf8, 0xac, 0xa9, 0xf2, 0x8c, 0x80, 0x1d, 0x2e, 0x7d, 0x32, + 0x3d, 0x14, 0xd8, 0x12, 0x64, 0x1c, 0xf1, 0xd1, 0x22, 0x14, 0x1a, 0xc6, 0x3e, 0x1f, 0xe0, 0xe8, + 0x53, 0x5c, 0x2d, 0x7f, 0x8d, 0xb0, 0x21, 0x69, 0x38, 0xe6, 0x72, 0x49, 0xcb, 0x11, 0x92, 0xd3, + 0x8a, 0xa4, 0xa4, 0xe1, 0x98, 0xcb, 0x82, 0x38, 0x74, 0xac, 0xdb, 0x21, 0x11, 0xc2, 0x88, 0x7b, + 0x26, 0x0e, 0xe2, 0x6b, 0x09, 0x0b, 0xab, 0x72, 0xa8, 0x0c, 0xd0, 0x08, 0xed, 0xc0, 0xf2, 0x6c, + 0xb2, 0xb9, 0xab, 0x9f, 0xe4, 0xfe, 0xe7, 0x77, 0x91, 0x8d, 0x98, 0x8a, 0x15, 0x09, 0x44, 0x60, + 0x98, 0x38, 0x61, 0x43, 0x3f, 0xc5, 0x2b, 0x77, 0x5f, 0x42, 0x30, 0x3e, 0x39, 0x6b, 0x4e, 0xd8, + 0xc0, 0x5c, 0x3d, 0x7a, 0x06, 0x8e, 0x37, 0x8c, 0x7d, 0x96, 0x0e, 0x88, 0x1f, 0x58, 0x84, 0xea, + 0x33, 0xfc, 0xe1, 0xa7, 0x59, 0x13, 0xbe, 0xa1, 0x32, 0x70, 0x5a, 0x8e, 0x2f, 0xb4, 0x1c, 0x65, + 0xe1, 0xac, 0xb2, 0x50, 0x65, 0xe0, 0xb4, 0x1c, 0xf3, 0xb4, 0x4f, 0x6e, 0x87, 0x96, 0x4f, 0x4c, + 0xfd, 0x01, 0xde, 0xb7, 0xcb, 0x57, 0x3b, 0x82, 0x86, 0x63, 0x2e, 0x6a, 0x46, 0x93, 0x3e, 0x9d, + 0x1f, 0xc3, 0x6b, 0xfd, 0xcd, 0xe4, 0x9b, 0xfe, 0xb2, 0xef, 0x1b, 0x07, 0xa2, 0xd2, 0xa8, 0x33, + 0x3e, 0x44, 0x21, 0x6f, 0xd8, 0xf6, 0xe6, 0xae, 0x7e, 0xba, 0x2f, 0x5d, 0x53, 0xb6, 0x82, 0xc4, + 0x59, 0x67, 0x99, 0x81, 0x60, 0x81, 0xc5, 0x40, 0x5d, 0x87, 0x85, 0xc6, 0xdc, 0x60, 0x41, 0x37, + 0x19, 0x08, 0x16, 0x58, 0xfc, 0x49, 0x9d, 0x83, 0xcd, 0x5d, 0xfd, 0xc1, 0x01, 0x3f, 0x29, 0x03, + 0xc1, 0x02, 0x0b, 0x59, 0x30, 0xe4, 0xb8, 0x81, 0x7e, 0x66, 0x20, 0xe5, 0x99, 0x17, 0x9c, 0x2b, + 0x6e, 0x80, 0x19, 0x06, 0xfa, 0x89, 0x06, 0xe0, 0x25, 0x21, 0xfa, 0x50, 0x5f, 0x06, 0x48, 0x19, + 0xc8, 0x72, 0x12, 0xdb, 0x6b, 0x4e, 0xe0, 0x1f, 0x24, 0xb7, 0x36, 0xe5, 0x0c, 0x28, 0x56, 0xa0, + 0x5f, 0x6a, 0x70, 0x4a, 0xed, 0xde, 0x63, 0xf3, 0xe6, 0xb9, 0x47, 0xae, 0xf6, 0x3b, 0xcc, 0x2b, + 0xae, 0x6b, 0x57, 0xf4, 0x76, 0xab, 0x78, 0x6a, 0xb9, 0x0b, 0x2a, 0xee, 0x6a, 0x0b, 0xfa, 0xad, + 0x06, 0xd3, 0x32, 0x8b, 0x2a, 0x16, 0x16, 0xb9, 0x03, 0x49, 0xbf, 0x1d, 0x98, 0xc5, 0x11, 0x7e, + 0x8c, 0x3f, 0x49, 0xe8, 0xe0, 0xe3, 0x4e, 0xd3, 0xd0, 0x1f, 0x34, 0x98, 0x30, 0x89, 0x47, 0x1c, + 0x93, 0x38, 0x35, 0x66, 0xeb, 0x42, 0x5f, 0x26, 0x29, 0x59, 0x5b, 0x57, 0x15, 0x08, 0x61, 0x66, + 0x59, 0x9a, 0x39, 0xa1, 0xb2, 0x0e, 0x5b, 0xc5, 0xd9, 0x64, 0xa9, 0xca, 0xc1, 0x29, 0x2b, 0xd1, + 0xfb, 0x1a, 0x4c, 0x26, 0x1b, 0x20, 0x4a, 0xca, 0xd9, 0x01, 0xc6, 0x01, 0x6f, 0x5f, 0x97, 0xd3, + 0x80, 0x38, 0x6b, 0x01, 0xfa, 0x9d, 0xc6, 0x3a, 0xb5, 0xe8, 0x3a, 0x4a, 0xf5, 0x12, 0xf7, 0xe5, + 0x9b, 0x7d, 0xf7, 0x65, 0x8c, 0x20, 0x5c, 0x79, 0x21, 0x69, 0x05, 0x63, 0xce, 0x61, 0xab, 0x38, + 0xa3, 0x7a, 0x32, 0x66, 0x60, 0xd5, 0x42, 0xf4, 0x7d, 0x0d, 0x26, 0x48, 0xd2, 0x71, 0x53, 0xfd, + 0x5c, 0x5f, 0x9c, 0xd8, 0xb5, 0x89, 0x17, 0x03, 0x04, 0x85, 0x45, 0x71, 0x0a, 0x9b, 0x75, 0x90, + 0x64, 0xdf, 0x68, 0x78, 0x36, 0xd1, 0xff, 0xa7, 0xcf, 0x1d, 0xe4, 0x9a, 0xd0, 0x8b, 0x23, 0x00, + 0x74, 0x01, 0x0a, 0x4e, 0x68, 0xdb, 0xec, 0xa6, 0xad, 0x3f, 0xcc, 0x7b, 0x91, 0x78, 0x80, 0x7d, + 0x45, 0xd2, 0x71, 0x2c, 0x81, 0x76, 0x61, 0x61, 0xff, 0xe5, 0x70, 0x87, 0xf8, 0x0e, 0x09, 0x08, + 0xed, 0x3a, 0xcb, 0xd4, 0xcf, 0x73, 0x2d, 0x73, 0xed, 0x56, 0x71, 0x76, 0xbb, 0xfb, 0xb4, 0xf3, + 0xae, 0x3a, 0xd0, 0xab, 0xf0, 0xa0, 0x22, 0xb3, 0xd6, 0xd8, 0x21, 0xa6, 0x49, 0xcc, 0xe8, 0xe2, + 0xa6, 0xff, 0xaf, 0x98, 0xa7, 0x46, 0x07, 0x7c, 0x3b, 0x2b, 0x80, 0xef, 0xb4, 0x1a, 0x5d, 0x86, + 0x59, 0x85, 0xbd, 0xee, 0x04, 0x9b, 0x7e, 0x35, 0xf0, 0x2d, 0xa7, 0xae, 0x2f, 0x72, 0xbd, 0xa7, + 0xa2, 0x13, 0xb9, 0xad, 0xf0, 0x70, 0x8f, 0x35, 0xe8, 0xcb, 0x29, 0x6d, 0xfc, 0x85, 0xa3, 0xe1, + 0xbd, 0x4c, 0x0e, 0xa8, 0xfe, 0x08, 0xef, 0x4e, 0xf8, 0x66, 0x6f, 0x2b, 0x74, 0xdc, 0x43, 0x1e, + 0xbd, 0x00, 0x27, 0x33, 0x1c, 0x76, 0x45, 0xd1, 0x1f, 0x15, 0x77, 0x0d, 0xd6, 0xcf, 0x6e, 0x47, + 0x44, 0xdc, 0x4d, 0x12, 0x7d, 0x09, 0x90, 0x42, 0xde, 0x30, 0x3c, 0xbe, 0xfe, 0x31, 0x71, 0xed, + 0x61, 0x3b, 0xba, 0x2d, 0x69, 0xb8, 0x8b, 0x1c, 0xfa, 0x99, 0x96, 0x7a, 0x92, 0xe4, 0x76, 0x4c, + 0xf5, 0x0b, 0xfc, 0xfc, 0x6e, 0x1c, 0x31, 0x0a, 0x95, 0xb7, 0x46, 0xa1, 0x4d, 0x14, 0x37, 0x2b, + 0x50, 0xb8, 0x87, 0x09, 0x73, 0xec, 0x86, 0x9e, 0xc9, 0xf0, 0x68, 0x0a, 0x86, 0x6e, 0x11, 0xf9, + 0x0d, 0x0a, 0x66, 0x7f, 0x22, 0x13, 0xf2, 0x4d, 0xc3, 0x0e, 0xa3, 0x21, 0x43, 0x9f, 0xbb, 0x03, + 0x2c, 0x94, 0x3f, 0x97, 0x7b, 0x56, 0x9b, 0xfb, 0x40, 0x83, 0xd9, 0xee, 0x85, 0xe7, 0xbe, 0x9a, + 0xf5, 0x73, 0x0d, 0xa6, 0x3b, 0x6a, 0x4c, 0x17, 0x8b, 0x6e, 0xa7, 0x2d, 0x7a, 0xb5, 0xdf, 0xc5, + 0x42, 0x1c, 0x0e, 0xde, 0x21, 0xab, 0xe6, 0xfd, 0x48, 0x83, 0xa9, 0x6c, 0xda, 0xbe, 0x9f, 0xfe, + 0x2a, 0x7d, 0x90, 0x83, 0xd9, 0xee, 0x8d, 0x3d, 0xf2, 0xe3, 0x09, 0xc6, 0x60, 0x26, 0x41, 0xdd, + 0x86, 0xd9, 0xef, 0x6a, 0x30, 0x7e, 0x33, 0x96, 0x8b, 0xbe, 0x51, 0xe8, 0xfb, 0x0c, 0x2a, 0xaa, + 0x93, 0x09, 0x83, 0x62, 0x15, 0xb7, 0xf4, 0x7b, 0x0d, 0x66, 0xba, 0x36, 0x00, 0xe8, 0x3c, 0x8c, + 0x18, 0xb6, 0xed, 0xee, 0x89, 0x51, 0xa2, 0xf2, 0xea, 0x62, 0x99, 0x53, 0xb1, 0xe4, 0x2a, 0xde, + 0xcb, 0x7d, 0x5e, 0xde, 0x2b, 0xfd, 0x49, 0x83, 0x33, 0x77, 0x8a, 0xc4, 0xfb, 0xb2, 0xa5, 0x8b, + 0x50, 0x90, 0xcd, 0xfb, 0x01, 0xdf, 0x4e, 0x99, 0x8a, 0x65, 0xd2, 0xe0, 0x9f, 0xe5, 0x89, 0xbf, + 0x4a, 0x2f, 0xc0, 0x64, 0x66, 0x10, 0xce, 0xaa, 0xf3, 0x4d, 0xea, 0x3a, 0xca, 0x28, 0x3b, 0xae, + 0xce, 0xd1, 0xb7, 0x7a, 0x38, 0x96, 0x28, 0x7d, 0xa8, 0xc1, 0x54, 0x95, 0xf8, 0x4d, 0xab, 0x46, + 0x30, 0xd9, 0x25, 0x3e, 0x71, 0x6a, 0x04, 0x2d, 0xc1, 0x18, 0xff, 0xba, 0xc0, 0x33, 0x6a, 0xd1, + 0x2b, 0xa9, 0x69, 0xa9, 0x63, 0xec, 0x4a, 0xc4, 0xc0, 0x89, 0x4c, 0xfc, 0xfa, 0x2a, 0xd7, 0xf3, + 0xf5, 0xd5, 0x19, 0x18, 0xf6, 0x92, 0x49, 0x76, 0x81, 0x71, 0xb9, 0x25, 0x9c, 0xca, 0xb9, 0xae, + 0x1f, 0xf0, 0xf1, 0x5c, 0x5e, 0x72, 0x5d, 0x3f, 0xc0, 0x9c, 0x5a, 0xfa, 0x4b, 0x0e, 0x4e, 0xa4, + 0x0b, 0x01, 0x03, 0xf4, 0x43, 0xbb, 0xe3, 0x7d, 0x19, 0xe3, 0x61, 0xce, 0x51, 0xbf, 0x2e, 0xca, + 0xdd, 0xf9, 0xeb, 0x22, 0xf4, 0x22, 0x4c, 0xcb, 0x3f, 0xd7, 0xf6, 0x3d, 0x9f, 0x50, 0xfe, 0x4e, + 0x78, 0x28, 0xfd, 0x8d, 0xf2, 0x46, 0x56, 0x00, 0x77, 0xae, 0x41, 0x5f, 0xcc, 0x7c, 0xf9, 0x74, + 0x2e, 0xf9, 0xea, 0x89, 0xf5, 0x94, 0x7c, 0x7f, 0xae, 0xb3, 0x3c, 0xb2, 0xe6, 0xfb, 0xae, 0x9f, + 0xf9, 0x1c, 0x6a, 0x09, 0xc6, 0x76, 0x99, 0x00, 0xdf, 0xb8, 0x7c, 0xda, 0xe9, 0x97, 0x22, 0x06, + 0x4e, 0x64, 0xd0, 0xf3, 0x30, 0xe9, 0x7a, 0xa2, 0x85, 0xde, 0xb4, 0xcd, 0x2a, 0xb1, 0x77, 0xf9, + 0x28, 0xb2, 0x10, 0xcd, 0x8b, 0x53, 0x2c, 0x9c, 0x95, 0x2d, 0xfd, 0x59, 0x83, 0x6e, 0xdf, 0x35, + 0xa2, 0xd3, 0x62, 0xee, 0xab, 0x0c, 0x53, 0xa3, 0x99, 0x2f, 0x6a, 0xc2, 0x28, 0x15, 0xb1, 0x22, + 0x0f, 0xc3, 0xe6, 0x91, 0xdf, 0xee, 0xa4, 0x23, 0x4f, 0x34, 0x9c, 0x11, 0x35, 0x02, 0x63, 0xe7, + 0xa1, 0x66, 0x54, 0x42, 0xc7, 0x94, 0xaf, 0x02, 0x26, 0xc4, 0x79, 0x58, 0x59, 0x16, 0x34, 0x1c, + 0x73, 0x2b, 0xb5, 0x8f, 0x3e, 0x9d, 0x3f, 0xf6, 0xf1, 0xa7, 0xf3, 0xc7, 0x3e, 0xf9, 0x74, 0xfe, + 0xd8, 0xb7, 0xda, 0xf3, 0xda, 0x47, 0xed, 0x79, 0xed, 0xe3, 0xf6, 0xbc, 0xf6, 0x49, 0x7b, 0x5e, + 0xfb, 0x7b, 0x7b, 0x5e, 0xfb, 0xf1, 0x3f, 0xe6, 0x8f, 0x7d, 0xed, 0xf9, 0x23, 0xfd, 0x2b, 0xc1, + 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xdf, 0xe3, 0x03, 0xa1, 0x8a, 0x30, 0x00, 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -1467,6 +1498,20 @@ func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, e _ = i var l int _ = l + if len(m.SelectableFields) > 0 { + for iNdEx := len(m.SelectableFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SelectableFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.PreserveUnknownFields != nil { i-- if *m.PreserveUnknownFields { @@ -1645,6 +1690,20 @@ func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int _ = i var l int _ = l + if len(m.SelectableFields) > 0 { + for iNdEx := len(m.SelectableFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SelectableFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } if m.DeprecationWarning != nil { i -= len(*m.DeprecationWarning) copy(dAtA[i:], *m.DeprecationWarning) @@ -2597,6 +2656,34 @@ func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } +func (m *SelectableField) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectableField) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelectableField) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2979,6 +3066,12 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { if m.PreserveUnknownFields != nil { n += 2 } + if len(m.SelectableFields) > 0 { + for _, e := range m.SelectableFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3034,6 +3127,12 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { l = len(*m.DeprecationWarning) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.SelectableFields) > 0 { + for _, e := range m.SelectableFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3348,6 +3447,17 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { return n } +func (m *SelectableField) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ServiceReference) Size() (n int) { if m == nil { return 0 @@ -3562,6 +3672,11 @@ func (this *CustomResourceDefinitionSpec) String() string { repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," } repeatedStringForAdditionalPrinterColumns += "}" + repeatedStringForSelectableFields := "[]SelectableField{" + for _, f := range this.SelectableFields { + repeatedStringForSelectableFields += strings.Replace(strings.Replace(f.String(), "SelectableField", "SelectableField", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectableFields += "}" s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, @@ -3573,6 +3688,7 @@ func (this *CustomResourceDefinitionSpec) String() string { `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, + `SelectableFields:` + repeatedStringForSelectableFields + `,`, `}`, }, "") return s @@ -3603,6 +3719,11 @@ func (this *CustomResourceDefinitionVersion) String() string { repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," } repeatedStringForAdditionalPrinterColumns += "}" + repeatedStringForSelectableFields := "[]SelectableField{" + for _, f := range this.SelectableFields { + repeatedStringForSelectableFields += strings.Replace(strings.Replace(f.String(), "SelectableField", "SelectableField", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectableFields += "}" s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, @@ -3612,6 +3733,7 @@ func (this *CustomResourceDefinitionVersion) String() string { `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, + `SelectableFields:` + repeatedStringForSelectableFields + `,`, `}`, }, "") return s @@ -3835,6 +3957,16 @@ func (this *JSONSchemaPropsOrStringArray) String() string { }, "") return s } +func (this *SelectableField) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectableField{`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} func (this *ServiceReference) String() string { if this == nil { return "nil" @@ -5755,6 +5887,40 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.PreserveUnknownFields = &b + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectableFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectableFields = append(m.SelectableFields, SelectableField{}) + if err := m.SelectableFields[len(m.SelectableFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6185,6 +6351,40 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.DeprecationWarning = &s iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectableFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectableFields = append(m.SelectableFields, SelectableField{}) + if err := m.SelectableFields[len(m.SelectableFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8825,6 +9025,88 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelectableField) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectableField: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectableField: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index b8477322b70..62d7a33dcd1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -40,6 +40,7 @@ message ConversionRequest { optional string desiredAPIVersion = 2; // objects is the list of custom resource objects to be converted. + // +listType=atomic repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } @@ -53,6 +54,7 @@ message ConversionResponse { // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + // +listType=atomic repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if @@ -125,6 +127,7 @@ message CustomResourceConversion { // include any versions known to the API Server, calls to the webhook will fail. // Defaults to `["v1beta1"]`. // +optional + // +listType=atomic repeated string conversionReviewVersions = 3; } @@ -194,6 +197,7 @@ message CustomResourceDefinitionNames { // and used by clients to support invocations like `kubectl get `. // It must be all lowercase. // +optional + // +listType=atomic repeated string shortNames = 3; // kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -208,6 +212,7 @@ message CustomResourceDefinitionNames { // This is published in API discovery documents, and used by clients to support invocations like // `kubectl get all`. // +optional + // +listType=atomic repeated string categories = 6; } @@ -256,6 +261,7 @@ message CustomResourceDefinitionSpec { // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. // +optional + // +listType=atomic repeated CustomResourceDefinitionVersion versions = 7; // additionalPrinterColumns specifies additional columns returned in Table output. @@ -264,8 +270,17 @@ message CustomResourceDefinitionSpec { // Top-level and per-version columns are mutually exclusive. // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; + // selectableFields specifies paths to fields that may be used as field selectors. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + repeated SelectableField selectableFields = 11; + // conversion defines conversion settings for the CRD. // +optional optional CustomResourceConversion conversion = 9; @@ -302,6 +317,7 @@ message CustomResourceDefinitionStatus { // versions from this list. // Versions may not be removed from `spec.versions` while they exist in this list. // +optional + // +listType=atomic repeated string storedVersions = 3; } @@ -349,7 +365,16 @@ message CustomResourceDefinitionVersion { // Per-version columns must not all be set to identical values (top-level columns should be used instead). // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; + + // selectableFields specifies paths to fields that may be used as field selectors. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + repeated SelectableField selectableFields = 9; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. @@ -491,20 +516,25 @@ message JSONSchemaProps { optional double multipleOf = 19; + // +listType=atomic repeated JSON enum = 20; optional int64 maxProperties = 21; optional int64 minProperties = 22; + // +listType=atomic repeated string required = 23; optional JSONSchemaPropsOrArray items = 24; + // +listType=atomic repeated JSONSchemaProps allOf = 25; + // +listType=atomic repeated JSONSchemaProps oneOf = 26; + // +listType=atomic repeated JSONSchemaProps anyOf = 27; optional JSONSchemaProps not = 28; @@ -570,6 +600,7 @@ message JSONSchemaProps { // to ensure those properties are present for all list items. // // +optional + // +listType=atomic repeated string xKubernetesListMapKeys = 41; // x-kubernetes-list-type annotates an array to further describe its topology. @@ -616,6 +647,7 @@ message JSONSchemaProps { message JSONSchemaPropsOrArray { optional JSONSchemaProps schema = 1; + // +listType=atomic repeated JSONSchemaProps jSONSchemas = 2; } @@ -631,9 +663,23 @@ message JSONSchemaPropsOrBool { message JSONSchemaPropsOrStringArray { optional JSONSchemaProps schema = 1; + // +listType=atomic repeated string property = 2; } +// SelectableField specifies the JSON path of a field that may be used with field selectors. +message SelectableField { + // jsonPath is a simple JSON path which is evaluated against each custom resource to produce a + // field selector value. + // Only JSON paths without the array notation are allowed. + // Must point to a field of type string, boolean or integer. Types with enum values + // and strings with formats are allowed. + // If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. + // Must not point to metdata fields. + // Required. + optional string jsonPath = 1; +} + // ServiceReference holds a reference to Service.legacy.k8s.io message ServiceReference { // namespace is the namespace of the service. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index db445b10d2b..153f7233775 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -76,6 +76,7 @@ type CustomResourceDefinitionSpec struct { // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. // +optional + // +listType=atomic Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` // additionalPrinterColumns specifies additional columns returned in Table output. // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. @@ -83,8 +84,17 @@ type CustomResourceDefinitionSpec struct { // Top-level and per-version columns are mutually exclusive. // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` + // selectableFields specifies paths to fields that may be used as field selectors. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + SelectableFields []SelectableField `json:"selectableFields,omitempty" protobuf:"bytes,11,rep,name=selectableFields"` + // conversion defines conversion settings for the CRD. // +optional Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` @@ -122,6 +132,7 @@ type CustomResourceConversion struct { // include any versions known to the API Server, calls to the webhook will fail. // Defaults to `["v1beta1"]`. // +optional + // +listType=atomic ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } @@ -227,7 +238,29 @@ type CustomResourceDefinitionVersion struct { // Per-version columns must not all be set to identical values (top-level columns should be used instead). // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional + // +listType=atomic AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` + + // selectableFields specifies paths to fields that may be used as field selectors. + // See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors + // + // +featureGate=CustomResourceFieldSelectors + // +optional + // +listType=atomic + SelectableFields []SelectableField `json:"selectableFields,omitempty" protobuf:"bytes,9,rep,name=selectableFields"` +} + +// SelectableField specifies the JSON path of a field that may be used with field selectors. +type SelectableField struct { + // jsonPath is a simple JSON path which is evaluated against each custom resource to produce a + // field selector value. + // Only JSON paths without the array notation are allowed. + // Must point to a field of type string, boolean or integer. Types with enum values + // and strings with formats are allowed. + // If jsonPath refers to absent field in a resource, the jsonPath evaluates to an empty string. + // Must not point to metdata fields. + // Required. + JSONPath string `json:"jsonPath" protobuf:"bytes,1,opt,name=jsonPath"` } // CustomResourceColumnDefinition specifies a column for server side printing. @@ -269,6 +302,7 @@ type CustomResourceDefinitionNames struct { // and used by clients to support invocations like `kubectl get `. // It must be all lowercase. // +optional + // +listType=atomic ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` // kind is the serialized kind of the resource. It is normally CamelCase and singular. // Custom resource instances will use this value as the `kind` attribute in API calls. @@ -280,6 +314,7 @@ type CustomResourceDefinitionNames struct { // This is published in API discovery documents, and used by clients to support invocations like // `kubectl get all`. // +optional + // +listType=atomic Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` } @@ -377,6 +412,7 @@ type CustomResourceDefinitionStatus struct { // versions from this list. // Versions may not be removed from `spec.versions` while they exist in this list. // +optional + // +listType=atomic StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } @@ -509,6 +545,7 @@ type ConversionRequest struct { // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` // objects is the list of custom resource objects to be converted. + // +listType=atomic Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` } @@ -521,6 +558,7 @@ type ConversionResponse struct { // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + // +listType=atomic ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 24c45bb04eb..86013e39f7c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -76,25 +76,30 @@ type JSONSchemaProps struct { // default is a default value for undefined object fields. // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. - Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` - Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` - Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` - MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` - MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` - Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` - MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` - MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` - UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` - MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` - Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` - MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` - MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` - Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` - Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` - AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` - OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + // +listType=atomic + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + // +listType=atomic + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + // +listType=atomic + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + // +listType=atomic + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + // +listType=atomic AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` @@ -150,6 +155,7 @@ type JSONSchemaProps struct { // to ensure those properties are present for all list items. // // +optional + // +listType=atomic XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` // x-kubernetes-list-type annotates an array to further describe its topology. @@ -343,7 +349,8 @@ type JSONSchemaURL string // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps // or an array of JSONSchemaProps. Mainly here for serialization purposes. type JSONSchemaPropsOrArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + // +listType=atomic JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` } @@ -385,8 +392,9 @@ type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray // JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. type JSONSchemaPropsOrStringArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - Property []string `protobuf:"bytes,2,rep,name=property"` + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + // +listType=atomic + Property []string `protobuf:"bytes,2,rep,name=property"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index fa6e0ef24e7..d59274e8da6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -212,6 +212,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*SelectableField)(nil), (*apiextensions.SelectableField)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SelectableField_To_apiextensions_SelectableField(a.(*SelectableField), b.(*apiextensions.SelectableField), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.SelectableField)(nil), (*SelectableField)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_SelectableField_To_v1beta1_SelectableField(a.(*apiextensions.SelectableField), b.(*SelectableField), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) }); err != nil { @@ -491,6 +501,7 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]apiextensions.SelectableField)(unsafe.Pointer(&in.SelectableFields)) if in.Conversion != nil { in, out := &in.Conversion, &out.Conversion *out = new(apiextensions.CustomResourceConversion) @@ -538,6 +549,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes out.Versions = nil } out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]SelectableField)(unsafe.Pointer(&in.SelectableFields)) if in.Conversion != nil { in, out := &in.Conversion, &out.Conversion *out = new(CustomResourceConversion) @@ -601,6 +613,7 @@ func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_Custom } out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]apiextensions.SelectableField)(unsafe.Pointer(&in.SelectableFields)) return nil } @@ -626,6 +639,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_Custom } out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.SelectableFields = *(*[]SelectableField)(unsafe.Pointer(&in.SelectableFields)) return nil } @@ -1273,6 +1287,26 @@ func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPro return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in, out, s) } +func autoConvert_v1beta1_SelectableField_To_apiextensions_SelectableField(in *SelectableField, out *apiextensions.SelectableField, s conversion.Scope) error { + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1beta1_SelectableField_To_apiextensions_SelectableField is an autogenerated conversion function. +func Convert_v1beta1_SelectableField_To_apiextensions_SelectableField(in *SelectableField, out *apiextensions.SelectableField, s conversion.Scope) error { + return autoConvert_v1beta1_SelectableField_To_apiextensions_SelectableField(in, out, s) +} + +func autoConvert_apiextensions_SelectableField_To_v1beta1_SelectableField(in *apiextensions.SelectableField, out *SelectableField, s conversion.Scope) error { + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_SelectableField_To_v1beta1_SelectableField is an autogenerated conversion function. +func Convert_apiextensions_SelectableField_To_v1beta1_SelectableField(in *apiextensions.SelectableField, out *SelectableField, s conversion.Scope) error { + return autoConvert_apiextensions_SelectableField_To_v1beta1_SelectableField(in, out, s) +} + func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index bb8ab06cb95..18740925c35 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -279,6 +279,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.SelectableFields != nil { + in, out := &in.SelectableFields, &out.SelectableFields + *out = make([]SelectableField, len(*in)) + copy(*out, *in) + } if in.Conversion != nil { in, out := &in.Conversion, &out.Conversion *out = new(CustomResourceConversion) @@ -354,6 +359,11 @@ func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefin *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.SelectableFields != nil { + in, out := &in.SelectableFields, &out.SelectableFields + *out = make([]SelectableField, len(*in)) + copy(*out, *in) + } return } @@ -610,6 +620,22 @@ func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectableField) DeepCopyInto(out *SelectableField) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectableField. +func (in *SelectableField) DeepCopy() *SelectableField { + if in == nil { + return nil + } + out := new(SelectableField) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index b5e5c35c550..3be35f30858 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -197,6 +197,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.SelectableFields != nil { + in, out := &in.SelectableFields, &out.SelectableFields + *out = make([]SelectableField, len(*in)) + copy(*out, *in) + } if in.Conversion != nil { in, out := &in.Conversion, &out.Conversion *out = new(CustomResourceConversion) @@ -272,6 +277,11 @@ func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefin *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.SelectableFields != nil { + in, out := &in.SelectableFields, &out.SelectableFields + *out = make([]SelectableField, len(*in)) + copy(*out, *in) + } return } @@ -507,6 +517,22 @@ func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectableField) DeepCopyInto(out *SelectableField) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectableField. +func (in *SelectableField) DeepCopy() *SelectableField { + if in == nil { + return nil + } + out := new(SelectableField) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go index 1019b03e9dc..aaf2a139cfa 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go @@ -29,6 +29,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { Schema *CustomResourceValidationApplyConfiguration `json:"schema,omitempty"` Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` + SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } // CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with @@ -105,3 +106,16 @@ func (b *CustomResourceDefinitionVersionApplyConfiguration) WithAdditionalPrinte } return b } + +// WithSelectableFields adds the given value to the SelectableFields field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SelectableFields field. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSelectableFields(values ...*SelectableFieldApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectableFields") + } + b.SelectableFields = append(b.SelectableFields, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go new file mode 100644 index 00000000000..876dfa71c77 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// with apply. +type SelectableFieldApplyConfiguration struct { + JSONPath *string `json:"jsonPath,omitempty"` +} + +// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// apply. +func SelectableField() *SelectableFieldApplyConfiguration { + return &SelectableFieldApplyConfiguration{} +} + +// WithJSONPath sets the JSONPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPath field is set to the value of the last call. +func (b *SelectableFieldApplyConfiguration) WithJSONPath(value string) *SelectableFieldApplyConfiguration { + b.JSONPath = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go index f8c2903757b..49f4e433c74 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go @@ -33,6 +33,7 @@ type CustomResourceDefinitionSpecApplyConfiguration struct { Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` Versions []CustomResourceDefinitionVersionApplyConfiguration `json:"versions,omitempty"` AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` + SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` Conversion *CustomResourceConversionApplyConfiguration `json:"conversion,omitempty"` PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` } @@ -117,6 +118,19 @@ func (b *CustomResourceDefinitionSpecApplyConfiguration) WithAdditionalPrinterCo return b } +// WithSelectableFields adds the given value to the SelectableFields field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SelectableFields field. +func (b *CustomResourceDefinitionSpecApplyConfiguration) WithSelectableFields(values ...*SelectableFieldApplyConfiguration) *CustomResourceDefinitionSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectableFields") + } + b.SelectableFields = append(b.SelectableFields, *values[i]) + } + return b +} + // WithConversion sets the Conversion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Conversion field is set to the value of the last call. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go index 605a9f0a3fc..e110a1ec5bb 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go @@ -29,6 +29,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { Schema *CustomResourceValidationApplyConfiguration `json:"schema,omitempty"` Subresources *CustomResourceSubresourcesApplyConfiguration `json:"subresources,omitempty"` AdditionalPrinterColumns []CustomResourceColumnDefinitionApplyConfiguration `json:"additionalPrinterColumns,omitempty"` + SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } // CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with @@ -105,3 +106,16 @@ func (b *CustomResourceDefinitionVersionApplyConfiguration) WithAdditionalPrinte } return b } + +// WithSelectableFields adds the given value to the SelectableFields field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SelectableFields field. +func (b *CustomResourceDefinitionVersionApplyConfiguration) WithSelectableFields(values ...*SelectableFieldApplyConfiguration) *CustomResourceDefinitionVersionApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectableFields") + } + b.SelectableFields = append(b.SelectableFields, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go new file mode 100644 index 00000000000..8729d9586b6 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// with apply. +type SelectableFieldApplyConfiguration struct { + JSONPath *string `json:"jsonPath,omitempty"` +} + +// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// apply. +func SelectableField() *SelectableFieldApplyConfiguration { + return &SelectableFieldApplyConfiguration{} +} + +// WithJSONPath sets the JSONPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPath field is set to the value of the last call. +func (b *SelectableFieldApplyConfiguration) WithJSONPath(value string) *SelectableFieldApplyConfiguration { + b.JSONPath = &value + return b +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index c26eb55dac4..c27ca053b78 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -492,8 +492,7 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { - var err error +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() err = w.waitUntilFreshAndBlock(ctx, resourceVersion) @@ -501,12 +500,14 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion } else { err = w.waitUntilFreshAndBlock(ctx, resourceVersion) } + + defer func() { sort.Sort(sortableStoreElements(result)) }() defer w.RUnlock() if err != nil { - return nil, 0, "", err + return result, rv, index, err } - result, rv, index, err := func() ([]interface{}, uint64, string, error) { + result, rv, index, err = func() ([]interface{}, uint64, string, error) { // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we // want - they will be filtered out later. The fact that we return less things is only further performance improvement. @@ -519,7 +520,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion return w.store.List(), w.resourceVersion, "", nil }() - sort.Sort(sortableStoreElements(result)) return result, rv, index, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index fadc87d53de..1c0307bd913 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -84,7 +84,7 @@ var ( }, []string{"endpoint"}, ) - storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "") + storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.ALPHA, "") storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }} etcdEventsReceivedCounts = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ @@ -287,21 +287,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric } for i, m := range monitors { - cluster := fmt.Sprintf("etcd-%d", i) + storageClusterID := fmt.Sprintf("etcd-%d", i) - klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster) + klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID) ctx, cancel := context.WithTimeout(context.Background(), time.Second) metrics, err := m.Monitor(ctx) cancel() m.Close() if err != nil { - klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err) + klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err) continue } - metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster) + metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID) if err != nil { - klog.ErrorS(err, "Failed to create metric", "cluster", cluster) + klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID) } ch <- metric } diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 559aebb59a5..187eb5d8c5e 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -192,7 +192,7 @@ func (t *Trace) Log() { t.endTime = &endTime t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } diff --git a/vendor/knative.dev/pkg/apis/condition_set.go b/vendor/knative.dev/pkg/apis/condition_set.go index 1b110475ffd..940d1b3ab32 100644 --- a/vendor/knative.dev/pkg/apis/condition_set.go +++ b/vendor/knative.dev/pkg/apis/condition_set.go @@ -17,12 +17,12 @@ limitations under the License. package apis import ( + "errors" + "fmt" "reflect" "sort" "time" - "fmt" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -234,7 +234,7 @@ func (r conditionsImpl) ClearCondition(t ConditionType) error { } // Terminal conditions are not handled as they can't be nil if r.isTerminal(t) { - return fmt.Errorf("clearing terminal conditions not implemented") + return errors.New("clearing terminal conditions not implemented") } cond := r.GetCondition(t) if cond == nil { diff --git a/vendor/knative.dev/pkg/apis/convert.go b/vendor/knative.dev/pkg/apis/convert.go index d6a28a0727f..72e9b676184 100644 --- a/vendor/knative.dev/pkg/apis/convert.go +++ b/vendor/knative.dev/pkg/apis/convert.go @@ -24,7 +24,6 @@ func ConvertToViaProxy( ctx context.Context, source, proxy, sink Convertible, ) error { - if err := source.ConvertTo(ctx, proxy); err != nil { return err } @@ -38,7 +37,6 @@ func ConvertFromViaProxy( ctx context.Context, source, proxy, sink Convertible, ) error { - if err := proxy.ConvertFrom(ctx, source); err != nil { return err } diff --git a/vendor/knative.dev/pkg/apis/deprecated.go b/vendor/knative.dev/pkg/apis/deprecated.go index 8f07e71b31b..b5dfde2fbb4 100644 --- a/vendor/knative.dev/pkg/apis/deprecated.go +++ b/vendor/knative.dev/pkg/apis/deprecated.go @@ -94,7 +94,7 @@ func getPrefixedNamedFieldValues(prefix string, obj interface{}) (map[string]ref return fields, inlined } - for i := 0; i < objValue.NumField(); i++ { + for i := range objValue.NumField() { tf := objValue.Type().Field(i) if v := objValue.Field(i); v.IsValid() { jTag := tf.Tag.Get("json") diff --git a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go index a855c38571c..3877683c301 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go @@ -54,10 +54,8 @@ type Addressable struct { Audience *string `json:"audience,omitempty"` } -var ( - // Addressable is a Convertible type. - _ apis.Convertible = (*Addressable)(nil) -) +// Addressable is a Convertible type. +var _ apis.Convertible = (*Addressable)(nil) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go b/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go index 3169664b6ce..dfb81cbe621 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go @@ -16,10 +16,108 @@ limitations under the License. package v1 +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck/ducktypes" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/ptr" +) + +// +genduck + // AuthStatus is meant to provide the generated service account name // in the resource status. type AuthStatus struct { // ServiceAccountName is the name of the generated service account // used for this components OIDC authentication. ServiceAccountName *string `json:"serviceAccountName,omitempty"` + + // ServiceAccountNames is the list of names of the generated service accounts + // used for this components OIDC authentication. This list can have len() > 1, + // when the component uses multiple identities (e.g. in case of a Parallel). + ServiceAccountNames []string `json:"serviceAccountNames,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticatableType is a skeleton type wrapping AuthStatus in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize AuthenticatableType ObjectReferences and +// access the AuthenticatableType data. This is not a real resource. +type AuthenticatableType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status AuthenticatableStatus `json:"status"` +} + +type AuthenticatableStatus struct { + // Auth contains the service account name for the subscription + // +optional + Auth *AuthStatus `json:"auth,omitempty"` +} + +var ( + // AuthStatus is a Convertible type. + _ apis.Convertible = (*AuthStatus)(nil) + + // Verify AuthenticatableType resources meet duck contracts. + _ apis.Listable = (*AuthenticatableType)(nil) + _ ducktypes.Populatable = (*AuthenticatableType)(nil) + _ kmeta.OwnerRefable = (*AuthenticatableType)(nil) +) + +// GetFullType implements duck.Implementable +func (*AuthStatus) GetFullType() ducktypes.Populatable { + return &AuthenticatableType{} +} + +// ConvertTo implements apis.Convertible +func (a *AuthStatus) ConvertTo(_ context.Context, to apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", to) +} + +// ConvertFrom implements apis.Convertible +func (a *AuthStatus) ConvertFrom(_ context.Context, from apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", from) +} + +// Populate implements duck.Populatable +func (t *AuthenticatableType) Populate() { + t.Status = AuthenticatableStatus{ + Auth: &AuthStatus{ + // Populate ALL fields + ServiceAccountName: ptr.String("foo"), + ServiceAccountNames: []string{ + "bar", + "baz", + }, + }, + } +} + +// GetGroupVersionKind implements kmeta.OwnerRefable +func (t *AuthenticatableType) GetGroupVersionKind() schema.GroupVersionKind { + return t.GroupVersionKind() +} + +// GetListType implements apis.Listable +func (*AuthenticatableType) GetListType() runtime.Object { + return &AuthenticatableTypeList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticatableTypeList is a list of AuthenticatableType resources +type AuthenticatableTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []AuthenticatableType `json:"items"` } diff --git a/vendor/knative.dev/pkg/apis/duck/v1/destination.go b/vendor/knative.dev/pkg/apis/duck/v1/destination.go index 720d4067766..34c1af94f99 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/destination.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/destination.go @@ -103,11 +103,11 @@ func (d *Destination) SetDefaults(ctx context.Context) { } } -func validateCACerts(CACert *string) *apis.FieldError { +func validateCACerts(caCert *string) *apis.FieldError { // Check the object. var errs *apis.FieldError - block, err := pem.Decode([]byte(*CACert)) + block, err := pem.Decode([]byte(*caCert)) if err != nil && block == nil { errs = errs.Also(apis.ErrInvalidValue("CA Cert provided is invalid", "caCert")) return errs diff --git a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go index 4d03b6b97ce..2609fb1c99c 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go @@ -102,7 +102,6 @@ func (kr *KReference) Validate(ctx context.Context) *apis.FieldError { Details: fmt.Sprintf("parent namespace: %q does not match ref: %q", parentNS, kr.Namespace), }) } - } } return errs diff --git a/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go b/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go index 1f6ee8264e0..9229ba93069 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/kresource_type.go @@ -68,7 +68,7 @@ func (t *KResource) Populate() { // Populate ALL fields Type: "Birthday", Status: corev1.ConditionTrue, - LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 2, 28, 18, 52, 0, 0, time.UTC))}, Reason: "Celebrate", Message: "n3wScott, find your party hat :tada:", }} diff --git a/vendor/knative.dev/pkg/apis/duck/v1/register.go b/vendor/knative.dev/pkg/apis/duck/v1/register.go index fb4348a0585..b7366b66044 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/register.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/register.go @@ -52,6 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { (&KResource{}).GetListType(), &AddressableType{}, (&AddressableType{}).GetListType(), + &AuthenticatableType{}, + (&AuthenticatableType{}).GetListType(), &Source{}, (&Source{}).GetListType(), &WithPod{}, diff --git a/vendor/knative.dev/pkg/apis/duck/v1/source_types.go b/vendor/knative.dev/pkg/apis/duck/v1/source_types.go index 95b7d1417d1..284ab318dd5 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/source_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/source_types.go @@ -158,7 +158,7 @@ func (s *Source) Populate() { // Populate ALL fields Type: SourceConditionSinkProvided, Status: corev1.ConditionTrue, - LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 2, 28, 18, 52, 0, 0, time.UTC))}, }} s.Status.SinkURI = &apis.URL{ Scheme: "https", diff --git a/vendor/knative.dev/pkg/apis/duck/v1/status_types.go b/vendor/knative.dev/pkg/apis/duck/v1/status_types.go index 15e26453466..2e8d66f93b4 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/status_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/status_types.go @@ -97,7 +97,6 @@ func (s *Status) ConvertTo(ctx context.Context, sink *Status, predicates ...func conditions := make(apis.Conditions, 0, len(s.Conditions)) for _, c := range s.Conditions { - // Copy over the "happy" condition, which is the only condition that // we can reliably transfer. if c.Type == apis.ConditionReady || c.Type == apis.ConditionSucceeded { diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go index bc3174b2baa..bc263edfd67 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -158,6 +158,11 @@ func (in *AuthStatus) DeepCopyInto(out *AuthStatus) { *out = new(string) **out = **in } + if in.ServiceAccountNames != nil { + in, out := &in.ServiceAccountNames, &out.ServiceAccountNames + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -171,6 +176,87 @@ func (in *AuthStatus) DeepCopy() *AuthStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticatableStatus) DeepCopyInto(out *AuthenticatableStatus) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(AuthStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableStatus. +func (in *AuthenticatableStatus) DeepCopy() *AuthenticatableStatus { + if in == nil { + return nil + } + out := new(AuthenticatableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticatableType) DeepCopyInto(out *AuthenticatableType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableType. +func (in *AuthenticatableType) DeepCopy() *AuthenticatableType { + if in == nil { + return nil + } + out := new(AuthenticatableType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticatableType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticatableTypeList) DeepCopyInto(out *AuthenticatableTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuthenticatableType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableTypeList. +func (in *AuthenticatableTypeList) DeepCopy() *AuthenticatableTypeList { + if in == nil { + return nil + } + out := new(AuthenticatableTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticatableTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Binding) DeepCopyInto(out *Binding) { *out = *in diff --git a/vendor/knative.dev/pkg/apis/duck/verify.go b/vendor/knative.dev/pkg/apis/duck/verify.go index 48675f93a47..4e33e65972b 100644 --- a/vendor/knative.dev/pkg/apis/duck/verify.go +++ b/vendor/knative.dev/pkg/apis/duck/verify.go @@ -24,8 +24,10 @@ import ( "knative.dev/pkg/kmp" ) -type Implementable = ducktypes.Implementable -type Populatable = ducktypes.Populatable +type ( + Implementable = ducktypes.Implementable + Populatable = ducktypes.Populatable +) // VerifyType verifies that a particular concrete resource properly implements // the provided Implementable duck type. It is expected that under the resource diff --git a/vendor/knative.dev/pkg/apis/kind2resource.go b/vendor/knative.dev/pkg/apis/kind2resource.go index 37ffe080345..b1937086e6f 100644 --- a/vendor/knative.dev/pkg/apis/kind2resource.go +++ b/vendor/knative.dev/pkg/apis/kind2resource.go @@ -17,7 +17,6 @@ limitations under the License. package apis import ( - "fmt" "strings" "k8s.io/apimachinery/pkg/runtime/schema" @@ -41,7 +40,7 @@ func KindToResource(gvk schema.GroupVersionKind) schema.GroupVersionResource { func pluralizeKind(kind string) string { ret := strings.ToLower(kind) if strings.HasSuffix(ret, "s") { - return fmt.Sprintf("%ses", ret) + return ret + "es" } - return fmt.Sprintf("%ss", ret) + return ret + "s" } diff --git a/vendor/knative.dev/pkg/kmeta/names.go b/vendor/knative.dev/pkg/kmeta/names.go index 963b121d29f..0977544c8ca 100644 --- a/vendor/knative.dev/pkg/kmeta/names.go +++ b/vendor/knative.dev/pkg/kmeta/names.go @@ -18,6 +18,7 @@ package kmeta import ( "crypto/md5" //nolint:gosec // No strong cryptography needed. + "encoding/hex" "fmt" "regexp" ) @@ -53,7 +54,7 @@ func ChildName(parent, suffix string) string { // Format the return string, if it's shorter than longest: pad with // beginning of the suffix. This happens, for example, when parent is // short, but the suffix is very long. - ret := parent + fmt.Sprintf("%x", h) + ret := parent + hex.EncodeToString(h[:]) if d := longest - len(ret); d > 0 { ret += suffix[:d] } diff --git a/vendor/knative.dev/pkg/ptr/doc.go b/vendor/knative.dev/pkg/ptr/doc.go new file mode 100644 index 00000000000..1ebcea2845f --- /dev/null +++ b/vendor/knative.dev/pkg/ptr/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ptr holds utilities for taking pointer references to values. +package ptr diff --git a/vendor/knative.dev/pkg/ptr/ptr.go b/vendor/knative.dev/pkg/ptr/ptr.go new file mode 100644 index 00000000000..6d5eeaab69d --- /dev/null +++ b/vendor/knative.dev/pkg/ptr/ptr.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import "time" + +// Int32 is a helper for turning integers into pointers for use in +// API types that want *int32. +func Int32(i int32) *int32 { + return &i +} + +// Int64 is a helper for turning integers into pointers for use in +// API types that want *int64. +func Int64(i int64) *int64 { + return &i +} + +// Float32 is a helper for turning floats into pointers for use in +// API types that want *float32. +func Float32(f float32) *float32 { + return &f +} + +// Float64 is a helper for turning floats into pointers for use in +// API types that want *float64. +func Float64(f float64) *float64 { + return &f +} + +// Bool is a helper for turning bools into pointers for use in +// API types that want *bool. +func Bool(b bool) *bool { + return &b +} + +// String is a helper for turning strings into pointers for use in +// API types that want *string. +func String(s string) *string { + return &s +} + +// Duration is a helper for turning time.Duration into pointers for use in +// API types that want *time.Duration. +func Duration(t time.Duration) *time.Duration { + return &t +} + +// Time is a helper for turning a const time.Time into a pointer for use in +// API types that want *time.Duration. +func Time(t time.Time) *time.Time { + return &t +} diff --git a/vendor/knative.dev/pkg/ptr/value.go b/vendor/knative.dev/pkg/ptr/value.go new file mode 100644 index 00000000000..148bf2105ca --- /dev/null +++ b/vendor/knative.dev/pkg/ptr/value.go @@ -0,0 +1,91 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import "time" + +// Int32Value is a helper for turning pointers to integers into values for use +// in API types that want int32. +func Int32Value(i *int32) int32 { + if i == nil { + return 0 + } + return *i +} + +// Int64Value is a helper for turning pointers to integers into values for use +// in API types that want int64. +func Int64Value(i *int64) int64 { + if i == nil { + return 0 + } + return *i +} + +// Float32Value is a helper for turning pointers to floats into values for use +// in API types that want float32. +func Float32Value(f *float32) float32 { + if f == nil { + return 0 + } + return *f +} + +// Float64Value is a helper for turning pointers to floats into values for use +// in API types that want float64. +func Float64Value(f *float64) float64 { + if f == nil { + return 0 + } + return *f +} + +// BoolValue is a helper for turning pointers to bools into values for use in +// API types that want bool. +func BoolValue(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// StringValue is a helper for turning pointers to strings into values for use +// in API types that want string. +func StringValue(s *string) string { + if s == nil { + return "" + } + return *s +} + +// DurationValue is a helper for turning *time.Duration into values for use in +// API types that want time.Duration. +func DurationValue(t *time.Duration) time.Duration { + if t == nil { + return 0 + } + return *t +} + +// TimeValue is a helper for turning *time.Time into values for use in API +// types that want API types that want time.Time. +func TimeValue(t *time.Time) time.Time { + if t == nil { + return time.Time{} + } + return *t +} diff --git a/vendor/knative.dev/pkg/tracker/interface.go b/vendor/knative.dev/pkg/tracker/interface.go index 5e580b34cf9..58bc941d8e4 100644 --- a/vendor/knative.dev/pkg/tracker/interface.go +++ b/vendor/knative.dev/pkg/tracker/interface.go @@ -177,5 +177,4 @@ func (ref *Reference) Validate(ctx context.Context) *apis.FieldError { } return errs - } diff --git a/vendor/modules.txt b/vendor/modules.txt index be5698960af..b2138587ef9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,11 @@ -# cloud.google.com/go v0.113.0 -## explicit; go 1.19 +# cloud.google.com/go v0.115.0 +## explicit; go 1.20 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.4.1 -## explicit; go 1.19 +# cloud.google.com/go/auth v0.7.3 +## explicit; go 1.20 cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/internal/externalaccount @@ -20,28 +20,28 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.2 -## explicit; go 1.19 +# cloud.google.com/go/auth/oauth2adapt v0.2.3 +## explicit; go 1.20 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.3.0 -## explicit; go 1.19 +# cloud.google.com/go/compute/metadata v0.5.0 +## explicit; go 1.20 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.7 -## explicit; go 1.19 +# cloud.google.com/go/iam v1.1.12 +## explicit; go 1.20 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/monitoring v1.18.2 -## explicit; go 1.19 +# cloud.google.com/go/monitoring v1.20.3 +## explicit; go 1.20 cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -# cloud.google.com/go/secretmanager v1.12.0 -## explicit; go 1.19 +# cloud.google.com/go/secretmanager v1.13.5 +## explicit; go 1.20 cloud.google.com/go/secretmanager/apiv1 cloud.google.com/go/secretmanager/apiv1/secretmanagerpb cloud.google.com/go/secretmanager/internal -# cloud.google.com/go/storage v1.40.0 -## explicit; go 1.19 +# cloud.google.com/go/storage v1.43.0 +## explicit; go 1.20 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 @@ -59,7 +59,7 @@ filippo.io/edwards25519/field # github.com/Azure/azure-amqp-common-go/v4 v4.2.0 ## explicit; go 1.18 github.com/Azure/azure-amqp-common-go/v4/auth -# github.com/Azure/azure-kusto-go v0.15.2 +# github.com/Azure/azure-kusto-go v0.16.1 ## explicit; go 1.19 github.com/Azure/azure-kusto-go/kusto github.com/Azure/azure-kusto-go/kusto/data/errors @@ -78,7 +78,7 @@ github.com/Azure/azure-kusto-go/kusto/kql github.com/Azure/azure-kusto-go/kusto/trustedendpoints github.com/Azure/azure-kusto-go/kusto/unsafe github.com/Azure/azure-kusto-go/kusto/utils -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -106,7 +106,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -131,7 +131,7 @@ github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils -# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin @@ -156,7 +156,7 @@ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets # github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -200,7 +200,7 @@ github.com/Azure/go-autorest/autorest/azure # github.com/Azure/go-autorest/autorest/adal v0.9.23 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 +# github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/azure/auth # github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 @@ -261,7 +261,7 @@ github.com/Huawei/gophercloud/openstack/identity/v3/services github.com/Huawei/gophercloud/openstack/identity/v3/tokens github.com/Huawei/gophercloud/openstack/utils github.com/Huawei/gophercloud/pagination -# github.com/IBM/sarama v1.43.1 +# github.com/IBM/sarama v1.43.2 ## explicit; go 1.19 github.com/IBM/sarama # github.com/NYTimes/gziphandler v1.1.1 @@ -291,7 +291,7 @@ github.com/ProtonMail/go-crypto/openpgp/s2k ## explicit; go 1.13 github.com/andybalholm/brotli github.com/andybalholm/brotli/matchfinder -# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df +# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 ## explicit; go 1.18 github.com/antlr/antlr4/runtime/Go/antlr/v4 # github.com/apapsch/go-jsonmerge/v2 v2.0.0 @@ -313,7 +313,7 @@ github.com/asaskevich/govalidator # github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 ## explicit; go 1.17 github.com/aws/aws-msk-iam-sasl-signer-go/signer -# github.com/aws/aws-sdk-go-v2 v1.26.1 +# github.com/aws/aws-sdk-go-v2 v1.30.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -330,8 +330,10 @@ github.com/aws/aws-sdk-go-v2/aws/transport/http github.com/aws/aws-sdk-go-v2/internal/auth github.com/aws/aws-sdk-go-v2/internal/auth/smithy github.com/aws/aws-sdk-go-v2/internal/awsutil +github.com/aws/aws-sdk-go-v2/internal/context github.com/aws/aws-sdk-go-v2/internal/endpoints github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn +github.com/aws/aws-sdk-go-v2/internal/middleware github.com/aws/aws-sdk-go-v2/internal/rand github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/sdkio @@ -339,14 +341,14 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.27.11 +# github.com/aws/aws-sdk-go-v2/config v1.27.27 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.11 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.27 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -355,81 +357,81 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/service/amp v1.25.4 +# github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/amp github.com/aws/aws-sdk-go-v2/service/amp/internal/endpoints github.com/aws/aws-sdk-go-v2/service/amp/types -# github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.0 +# github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/cloudwatch github.com/aws/aws-sdk-go-v2/service/cloudwatch/internal/endpoints github.com/aws/aws-sdk-go-v2/service/cloudwatch/types -# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 +# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/dynamodb github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints github.com/aws/aws-sdk-go-v2/service/dynamodb/types -# github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 +# github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/dynamodbstreams github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/internal/endpoints github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 +# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 +# github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/kinesis github.com/aws/aws-sdk-go-v2/service/kinesis/internal/customizations github.com/aws/aws-sdk-go-v2/service/kinesis/internal/endpoints github.com/aws/aws-sdk-go-v2/service/kinesis/types -# github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 +# github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/secretsmanager github.com/aws/aws-sdk-go-v2/service/secretsmanager/internal/endpoints github.com/aws/aws-sdk-go-v2/service/secretsmanager/types -# github.com/aws/aws-sdk-go-v2/service/sqs v1.31.4 +# github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sqs github.com/aws/aws-sdk-go-v2/service/sqs/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sqs/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 +# github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 +# github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.20.2 +# github.com/aws/smithy-go v1.20.3 ## explicit; go 1.20 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -460,8 +462,8 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 -## explicit; go 1.13 +# github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 +## explicit; go 1.16 github.com/bradleyfalzon/ghinstallation/v2 # github.com/cenkalti/backoff/v3 v3.2.2 ## explicit; go 1.12 @@ -535,7 +537,7 @@ github.com/dysnix/predictkube-libs/external/grpc/server github.com/dysnix/predictkube-libs/external/grpc/zstd_compressor github.com/dysnix/predictkube-libs/external/http_transport github.com/dysnix/predictkube-libs/external/types_convertation -# github.com/dysnix/predictkube-proto v0.0.0-20220713123213-7135dce1e9c9 +# github.com/dysnix/predictkube-proto v0.0.0-20240703070754-1483183ce065 ## explicit; go 1.16 github.com/dysnix/predictkube-proto/external/proto/commonproto github.com/dysnix/predictkube-proto/external/proto/enums @@ -567,7 +569,7 @@ github.com/evanphx/json-patch ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json -# github.com/expr-lang/expr v1.16.5 +# github.com/expr-lang/expr v1.16.9 ## explicit; go 1.18 github.com/expr-lang/expr github.com/expr-lang/expr/ast @@ -618,7 +620,7 @@ github.com/go-kivik/couchdb/v3/chttp github.com/go-kivik/kivik/v3 github.com/go-kivik/kivik/v3/driver github.com/go-kivik/kivik/v3/internal/registry -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -646,15 +648,15 @@ github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.19.0 +# github.com/go-playground/validator/v10 v10.22.0 ## explicit; go 1.18 github.com/go-playground/validator/v10 # github.com/go-sql-driver/mysql v1.8.1 ## explicit; go 1.18 github.com/go-sql-driver/mysql -# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 -## explicit; go 1.13 -github.com/go-task/slim-sprig +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 # github.com/gobuffalo/flect v1.0.2 ## explicit; go 1.16 github.com/gobuffalo/flect @@ -752,9 +754,9 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-github/v50 v50.2.0 ## explicit; go 1.17 github.com/google/go-github/v50/github -# github.com/google/go-github/v60 v60.0.0 +# github.com/google/go-github/v62 v62.0.0 ## explicit; go 1.21 -github.com/google/go-github/v60/github +github.com/google/go-github/v62/github # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query @@ -762,11 +764,11 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 +# github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af ## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/s2a-go v0.1.7 -## explicit; go 1.19 +# github.com/google/s2a-go v0.1.8 +## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -798,14 +800,14 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.4 -## explicit; go 1.19 +# github.com/googleapis/gax-go/v2 v2.13.0 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gophercloud/gophercloud v1.11.0 +# github.com/gophercloud/gophercloud v1.14.0 ## explicit; go 1.14 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack @@ -825,17 +827,17 @@ github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware -# github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 +# github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 ## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus -# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 -## explicit; go 1.14 +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 +## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 -## explicit; go 1.19 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 +## explicit; go 1.20 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -880,7 +882,7 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/vault/api v1.13.0 +# github.com/hashicorp/vault/api v1.14.0 ## explicit; go 1.21 github.com/hashicorp/vault/api # github.com/imdario/mergo v0.3.16 @@ -911,17 +913,16 @@ github.com/jackc/pgpassfile # github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 ## explicit; go 1.14 github.com/jackc/pgservicefile -# github.com/jackc/pgx/v5 v5.5.5 -## explicit; go 1.19 +# github.com/jackc/pgx/v5 v5.6.0 +## explicit; go 1.20 github.com/jackc/pgx/v5 -github.com/jackc/pgx/v5/internal/anynil github.com/jackc/pgx/v5/internal/iobufpool github.com/jackc/pgx/v5/internal/pgio github.com/jackc/pgx/v5/internal/sanitize github.com/jackc/pgx/v5/internal/stmtcache github.com/jackc/pgx/v5/pgconn +github.com/jackc/pgx/v5/pgconn/ctxwatch github.com/jackc/pgx/v5/pgconn/internal/bgreader -github.com/jackc/pgx/v5/pgconn/internal/ctxwatch github.com/jackc/pgx/v5/pgproto3 github.com/jackc/pgx/v5/pgtype github.com/jackc/pgx/v5/pgxpool @@ -1000,7 +1001,7 @@ github.com/jstemmer/go-junit-report/v2/junit github.com/jstemmer/go-junit-report/v2/parser/gotest github.com/jstemmer/go-junit-report/v2/parser/gotest/internal/collector github.com/jstemmer/go-junit-report/v2/parser/gotest/internal/reader -# github.com/klauspost/compress v1.17.7 +# github.com/klauspost/compress v1.17.8 ## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -1037,9 +1038,9 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 +## explicit; go 1.19 +github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/microsoft/ApplicationInsights-Go v0.4.4 ## explicit; go 1.12 github.com/microsoft/ApplicationInsights-Go/appinsights @@ -1140,7 +1141,7 @@ github.com/oapi-codegen/runtime/types # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo/v2 v2.17.1 +# github.com/onsi/ginkgo/v2 v2.19.1 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -1162,7 +1163,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.33.0 +# github.com/onsi/gomega v1.34.1 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -1174,7 +1175,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/open-policy-agent/cert-controller v0.0.0-00010101000000-000000000000 => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 +# github.com/open-policy-agent/cert-controller v0.10.1 => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 ## explicit; go 1.20 github.com/open-policy-agent/cert-controller/pkg/rotator # github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 @@ -1196,8 +1197,8 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.19.0 => github.com/prometheus/client_golang v1.16.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.19.1 => github.com/prometheus/client_golang v1.18.0 +## explicit; go 1.19 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -1206,27 +1207,28 @@ github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.6.1 => github.com/prometheus/client_model v0.4.0 -## explicit; go 1.18 +github.com/prometheus/client_golang/prometheus/testutil/promlint/validations +# github.com/prometheus/client_model v0.6.1 => github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.53.0 => github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.55.0 => github.com/prometheus/common v0.45.0 +## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.14.0 -## explicit; go 1.21 +# github.com/prometheus/procfs v0.15.1 +## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rabbitmq/amqp091-go v1.9.0 -## explicit; go 1.16 +# github.com/rabbitmq/amqp091-go v1.10.0 +## explicit; go 1.20 github.com/rabbitmq/amqp091-go # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/redis/go-redis/v9 v9.5.1 +# github.com/redis/go-redis/v9 v9.6.1 ## explicit; go 1.18 github.com/redis/go-redis/v9 github.com/redis/go-redis/v9/internal @@ -1304,6 +1306,9 @@ github.com/segmentio/kafka-go/sasl/scram # github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 ## explicit; go 1.15 github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 +# github.com/sergi/go-diff v1.2.0 +## explicit; go 1.12 +github.com/sergi/go-diff/diffmatchpatch # github.com/shopspring/decimal v1.3.1 ## explicit; go 1.13 github.com/shopspring/decimal @@ -1335,7 +1340,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/tidwall/gjson v1.17.1 +# github.com/tidwall/gjson v1.17.3 ## explicit; go 1.12 github.com/tidwall/gjson # github.com/tidwall/match v1.1.1 @@ -1380,10 +1385,10 @@ github.com/xhit/go-str2duration/v2 # github.com/xlab/treeprint v1.2.0 ## explicit; go 1.13 github.com/xlab/treeprint -# github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a -## explicit; go 1.12 +# github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 +## explicit; go 1.17 github.com/youmark/pkcs8 -# go.etcd.io/etcd/api/v3 v3.5.13 +# go.etcd.io/etcd/api/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -1391,7 +1396,7 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.13 +# go.etcd.io/etcd/client/pkg/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -1399,13 +1404,13 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.13 +# go.etcd.io/etcd/client/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.mongodb.org/mongo-driver v1.15.0 +# go.mongodb.org/mongo-driver v1.16.0 ## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -1481,7 +1486,7 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.26.0 +# go.opentelemetry.io/otel v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1496,16 +1501,16 @@ go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal @@ -1524,20 +1529,20 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.26.0 +# go.opentelemetry.io/otel/metric v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.26.0 +# go.opentelemetry.io/otel/sdk v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/sdk/metric v1.26.0 +# go.opentelemetry.io/otel/sdk/metric v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/internal @@ -1545,12 +1550,12 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.26.0 +# go.opentelemetry.io/otel/trace v1.28.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.2.0 +# go.opentelemetry.io/proto/otlp v1.3.1 ## explicit; go 1.17 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1595,8 +1600,8 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.24.0 => golang.org/x/crypto v0.22.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.25.0 => golang.org/x/crypto v0.25.0 +## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 @@ -1616,18 +1621,18 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f +# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.17.0 +# golang.org/x/mod v0.19.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.26.0 => golang.org/x/net v0.24.0 +# golang.org/x/net v0.27.0 => golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -1644,7 +1649,7 @@ golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.20.0 +# golang.org/x/oauth2 v0.22.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1657,22 +1662,22 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.19.0 +# golang.org/x/term v0.22.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.16.0 => golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 => golang.org/x/text v0.16.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding @@ -1708,13 +1713,12 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.20.0 +# golang.org/x/tools v0.23.0 ## explicit; go 1.19 golang.org/x/tools/cover golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports @@ -1723,7 +1727,6 @@ golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk @@ -1741,7 +1744,7 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.181.0 +# google.golang.org/api v0.190.0 ## explicit; go 1.20 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1759,13 +1762,14 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda -## explicit; go 1.19 +# google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf +## explicit; go 1.20 +google.golang.org/genproto/googleapis/cloud/location google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f +## explicit; go 1.20 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution @@ -1774,13 +1778,13 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf +## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.0 => google.golang.org/grpc v1.63.2 -## explicit; go 1.19 +# google.golang.org/grpc v1.65.0 => google.golang.org/grpc v1.65.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1789,6 +1793,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1821,7 +1826,6 @@ google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -1841,6 +1845,10 @@ google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 +google.golang.org/grpc/reflection/grpc_reflection_v1alpha +google.golang.org/grpc/reflection/internal google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual @@ -1848,11 +1856,11 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 -## explicit; go 1.17 +# google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 +## explicit; go 1.21 google.golang.org/grpc/cmd/protoc-gen-go-grpc -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen @@ -1916,7 +1924,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.4 => k8s.io/api v0.29.4 +# k8s.io/api v0.30.0 => k8s.io/api v0.29.7 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1972,8 +1980,8 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.4 -## explicit; go 1.21 +# k8s.io/apiextensions-apiserver v0.30.0 +## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 @@ -1983,7 +1991,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.4 => k8s.io/apimachinery v0.29.4 +# k8s.io/apimachinery v0.31.0-beta.0 => k8s.io/apimachinery v0.29.7 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -2046,7 +2054,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.4 => k8s.io/apiserver v0.29.4 +# k8s.io/apiserver v0.30.0 => k8s.io/apiserver v0.29.7 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -2194,7 +2202,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.4 => k8s.io/client-go v0.29.4 +# k8s.io/client-go v0.30.0 => k8s.io/client-go v0.29.7 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -2467,7 +2475,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.29.4 => k8s.io/code-generator v0.29.4 +# k8s.io/code-generator v0.30.0 => k8s.io/code-generator v0.29.7 ## explicit; go 1.21 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -2505,7 +2513,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.29.4 => k8s.io/component-base v0.29.4 +# k8s.io/component-base v0.30.0 => k8s.io/component-base v0.29.7 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -2548,13 +2556,13 @@ k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/klogr -# k8s.io/kms v0.29.4 -## explicit; go 1.21 +# k8s.io/kms v0.30.0 +## explicit; go 1.22.0 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args @@ -2580,7 +2588,7 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/metrics v0.29.4 => k8s.io/metrics v0.29.4 +# k8s.io/metrics v0.29.4 => k8s.io/metrics v0.29.7 ## explicit; go 1.21 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/install @@ -2589,7 +2597,7 @@ k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 k8s.io/metrics/pkg/apis/external_metrics k8s.io/metrics/pkg/apis/external_metrics/install k8s.io/metrics/pkg/apis/external_metrics/v1beta1 -# k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 +# k8s.io/utils v0.0.0-20240102154912-e7106e64919e ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -2604,8 +2612,8 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/pkg v0.0.0-20240423132823-3c6badc82748 -## explicit; go 1.21 +# knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 +## explicit; go 1.22 knative.dev/pkg/apis knative.dev/pkg/apis/duck knative.dev/pkg/apis/duck/ducktypes @@ -2613,6 +2621,7 @@ knative.dev/pkg/apis/duck/v1 knative.dev/pkg/kmap knative.dev/pkg/kmeta knative.dev/pkg/kmp +knative.dev/pkg/ptr knative.dev/pkg/tracker # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 ## explicit; go 1.20 @@ -2620,7 +2629,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.17.3 +# sigs.k8s.io/controller-runtime v0.17.5 ## explicit; go 1.21 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -2674,16 +2683,16 @@ sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 -## explicit; go 1.20 +# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240804232438-89b5deec030c +## explicit; go 1.22.0 sigs.k8s.io/controller-runtime/tools/setup-envtest sigs.k8s.io/controller-runtime/tools/setup-envtest/env sigs.k8s.io/controller-runtime/tools/setup-envtest/remote sigs.k8s.io/controller-runtime/tools/setup-envtest/store sigs.k8s.io/controller-runtime/tools/setup-envtest/versions sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows -# sigs.k8s.io/controller-tools v0.14.0 -## explicit; go 1.20 +# sigs.k8s.io/controller-tools v0.15.0 +## explicit; go 1.22.0 sigs.k8s.io/controller-tools/cmd/controller-gen sigs.k8s.io/controller-tools/pkg/crd sigs.k8s.io/controller-tools/pkg/crd/markers @@ -2698,7 +2707,7 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook -# sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240425173932-1a855fe8c789 +# sigs.k8s.io/custom-metrics-apiserver v1.29.0 ## explicit; go 1.21 sigs.k8s.io/custom-metrics-apiserver/pkg/apiserver sigs.k8s.io/custom-metrics-apiserver/pkg/apiserver/endpoints/handlers @@ -2719,7 +2728,7 @@ sigs.k8s.io/custom-metrics-apiserver/pkg/registry/external_metrics ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/kustomize/api v0.17.1 +# sigs.k8s.io/kustomize/api v0.17.3 ## explicit; go 1.21 sigs.k8s.io/kustomize/api/filters/annotations sigs.k8s.io/kustomize/api/filters/fieldspec @@ -2769,7 +2778,7 @@ sigs.k8s.io/kustomize/api/provider sigs.k8s.io/kustomize/api/resmap sigs.k8s.io/kustomize/api/resource sigs.k8s.io/kustomize/api/types -# sigs.k8s.io/kustomize/cmd/config v0.14.0 +# sigs.k8s.io/kustomize/cmd/config v0.14.2 ## explicit; go 1.21 sigs.k8s.io/kustomize/cmd/config/completion sigs.k8s.io/kustomize/cmd/config/configcobra @@ -2780,7 +2789,7 @@ sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/api sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/commands sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/tutorials sigs.k8s.io/kustomize/cmd/config/runner -# sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 +# sigs.k8s.io/kustomize/kustomize/v5 v5.4.3 ## explicit; go 1.21 sigs.k8s.io/kustomize/kustomize/v5 sigs.k8s.io/kustomize/kustomize/v5/commands @@ -2799,9 +2808,10 @@ sigs.k8s.io/kustomize/kustomize/v5/commands/openapi sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/fetch sigs.k8s.io/kustomize/kustomize/v5/commands/openapi/info sigs.k8s.io/kustomize/kustomize/v5/commands/version -# sigs.k8s.io/kustomize/kyaml v0.17.0 +# sigs.k8s.io/kustomize/kyaml v0.17.2 ## explicit; go 1.21 sigs.k8s.io/kustomize/kyaml/comments +sigs.k8s.io/kustomize/kyaml/copyutil sigs.k8s.io/kustomize/kyaml/errors sigs.k8s.io/kustomize/kyaml/ext sigs.k8s.io/kustomize/kyaml/fieldmeta @@ -2850,23 +2860,22 @@ sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 # github.com/open-policy-agent/cert-controller => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 # github.com/google/cel-go => github.com/google/cel-go v0.17.8 -# github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 -# github.com/prometheus/client_model => github.com/prometheus/client_model v0.4.0 -# github.com/prometheus/common => github.com/prometheus/common v0.44.0 -# k8s.io/api => k8s.io/api v0.29.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 -# k8s.io/apiserver => k8s.io/apiserver v0.29.4 -# k8s.io/client-go => k8s.io/client-go v0.29.4 -# k8s.io/code-generator => k8s.io/code-generator v0.29.4 -# k8s.io/component-base => k8s.io/component-base v0.29.4 +# github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.18.0 +# github.com/prometheus/client_model => github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/common => github.com/prometheus/common v0.45.0 +# k8s.io/api => k8s.io/api v0.29.7 +# k8s.io/apimachinery => k8s.io/apimachinery v0.29.7 +# k8s.io/apiserver => k8s.io/apiserver v0.29.7 +# k8s.io/client-go => k8s.io/client-go v0.29.7 +# k8s.io/code-generator => k8s.io/code-generator v0.29.7 +# k8s.io/component-base => k8s.io/component-base v0.29.7 # k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 -# k8s.io/metrics => k8s.io/metrics v0.29.4 +# k8s.io/metrics => k8s.io/metrics v0.29.7 # github.com/chzyer/logex => github.com/chzyer/logex v1.2.1 # github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.0 # github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.0 -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 -# golang.org/x/crypto => golang.org/x/crypto v0.22.0 -# golang.org/x/net => golang.org/x/net v0.24.0 -# golang.org/x/text => golang.org/x/text v0.14.0 -# google.golang.org/grpc => google.golang.org/grpc v1.63.2 +# golang.org/x/crypto => golang.org/x/crypto v0.25.0 +# golang.org/x/net => golang.org/x/net v0.27.0 +# golang.org/x/text => golang.org/x/text v0.16.0 +# google.golang.org/grpc => google.golang.org/grpc v1.65.0 # gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index e7f2945f1d9..73ad68fe43a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -419,19 +419,6 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { } } - for namespace, cfg := range opts.DefaultNamespaces { - cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) - if namespace == metav1.NamespaceAll { - cfg.FieldSelector = fields.AndSelectors( - appendIfNotNil( - namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), - cfg.FieldSelector, - )..., - ) - } - opts.DefaultNamespaces[namespace] = cfg - } - for obj, byObject := range opts.ByObject { isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper) if err != nil { @@ -485,6 +472,22 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { opts.ByObject[obj] = byObject } + // Default namespaces after byObject has been defaulted, otherwise a namespace without selectors + // will get the `Default` selectors, then get copied to byObject and then not get defaulted from + // byObject, as it already has selectors. + for namespace, cfg := range opts.DefaultNamespaces { + cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), + cfg.FieldSelector, + )..., + ) + } + opts.DefaultNamespaces[namespace] = cfg + } + // Default the resync period to 10 hours if unset if opts.SyncPeriod == nil { opts.SyncPeriod = &defaultSyncPeriod diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go new file mode 100644 index 00000000000..2f2f892ef3f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldOwner wraps a Client and adds the fieldOwner as the field +// manager to all write requests from this client. If additional [FieldOwner] +// options are specified on methods of this client, the value specified here +// will be overridden. +func WithFieldOwner(c Client, fieldOwner string) Client { + return &clientWithFieldManager{ + manager: fieldOwner, + c: c, + Reader: c, + } +} + +type clientWithFieldManager struct { + manager string + c Client + Reader +} + +func (f *clientWithFieldManager) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return f.c.Create(ctx, obj, append([]CreateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return f.c.Update(ctx, obj, append([]UpdateOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.manager)}, opts...)...) +} + +func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return f.c.Delete(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return f.c.DeleteAllOf(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) Scheme() *runtime.Scheme { return f.c.Scheme() } +func (f *clientWithFieldManager) RESTMapper() meta.RESTMapper { return f.c.RESTMapper() } +func (f *clientWithFieldManager) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return f.c.GroupVersionKindFor(obj) +} +func (f *clientWithFieldManager) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return f.c.IsObjectNamespaced(obj) +} + +func (f *clientWithFieldManager) Status() StatusWriter { + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: f.c.Status(), + } +} + +func (f *clientWithFieldManager) SubResource(subresource string) SubResourceClient { + c := f.c.SubResource(subresource) + return &subresourceClientWithFieldOwner{ + owner: f.manager, + subresourceWriter: c, + SubResourceReader: c, + } +} + +type subresourceClientWithFieldOwner struct { + owner string + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (f *subresourceClientWithFieldOwner) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return f.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return f.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return f.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{FieldOwner(f.owner)}, opts...)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index fdb9d982d93..2b03263de80 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -179,6 +179,24 @@ func (cm *controllerManager) add(r Runnable) error { return cm.runnables.Add(r) } +// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + if cm.metricsServer == nil { + cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler %q will be ignored", path) + return nil + } + if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil { + return err + } + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} + // AddHealthzCheck allows you to add Healthz checker. func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { cm.Lock() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 25c3c7375b2..0b7a865004a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -67,6 +67,15 @@ type Manager interface { // election was configured. Elected() <-chan struct{} + // AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. + // + // Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly. + // + // If the simple path -> handler mapping offered here is not enough, + // a new http server/listener should be added as Runnable to the manager via Add method. + AddMetricsServerExtraHandler(path string, handler http.Handler) error + // AddHealthzCheck allows you to add Healthz checker AddHealthzCheck(name string, check healthz.Checker) error diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go index e10c5c2103f..40eb9db8cc5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go @@ -46,6 +46,9 @@ var DefaultBindAddress = ":8080" // Server is a server that serves metrics. type Server interface { + // AddExtraHandler adds extra handler served on path to the http server that serves metrics. + AddExtraHandler(path string, handler http.Handler) error + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the metrics server doesn't need leader election. NeedLeaderElection() bool @@ -179,6 +182,23 @@ func (*defaultServer) NeedLeaderElection() bool { return false } +// AddExtraHandler adds extra handler served on path to the http server that serves metrics. +func (s *defaultServer) AddExtraHandler(path string, handler http.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.options.ExtraHandlers == nil { + s.options.ExtraHandlers = make(map[string]http.Handler) + } + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + if _, found := s.options.ExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + s.options.ExtraHandlers[path] = handler + return nil +} + // Start runs the server. // It will install the metrics related resources depend on the server configuration. func (s *defaultServer) Start(ctx context.Context) error { diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md index 1bdeebbc559..0482dd31622 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md @@ -4,13 +4,19 @@ This is a small tool that manages binaries for envtest. It can be used to download new binaries, list currently installed and available ones, and clean up versions. -To use it, just go-install it on 1.19+ (it's a separate, self-contained +To use it, just go-install it with Golang 1.22+ (it's a separate, self-contained module): ```shell go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest ``` +If you are using Golang 1.20 or 1.21, use the `release-0.17` branch instead: + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.17 +``` + For full documentation, run it with the `--help` flag, but here are some examples: @@ -40,6 +46,17 @@ setup-envtest use -i --use-env # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz + +# Per default envtest binaries are downloaded from: +# https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml +# To download from a custom index use the following: +setup-envtest use --index https://custom.com/envtest-releases.yaml + +# To download from the kubebuilder-tools GCS bucket: (default behavior before v0.18) +# Note: This is a Google-owned bucket and it might be shutdown at any time +# see: https://github.com/kubernetes/k8s.io/issues/2647#event-12439345373 +# Note: This flag will also be removed soon. +setup-envtest use --use-deprecated-gcs ``` ## Where does it put all those binaries? @@ -51,16 +68,16 @@ On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on OSX, `~/Library/Application Support`. There's an overall folder that holds all files, and inside that is -a folder for each version/platform pair. The exact directory structure is -not guarnateed, except that the leaf directory will contain the names -expected by envtest. You should always use `setup-envtest fetch` or +a folder for each version/platform pair. The exact directory structure is +not guaranteed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or `setup-envtest switch` (generally with the `-p path` or `-p env` flags) to get the directory that you should use. ## Why do I have to do that `source <(blah blah blah)` thing This is a normal binary, not a shell script, so we can't set the parent -process's environment variables. If you use this by hand a lot and want +process's environment variables. If you use this by hand a lot and want to save the typing, you could put something like the following in your `~/.zshrc` (or similar for bash/fish/whatever, modified to those): @@ -79,7 +96,7 @@ setup-envtest() { There are a few options. First, you'll probably want to set the `-i/--installed` flag. If you want -to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` env variable, which will switch that flag on by default. Then, you have a few options for managing your binaries: @@ -98,13 +115,18 @@ Then, you have a few options for managing your binaries: `--use-env` on by default. - If you want to use this tool, but download your gziped tarballs - separately, you can use the `sideload` command. You'll need to use the + separately, you can use the `sideload` command. You'll need to use the `-k/--version` flag to indicate which version you're sideloading. After that, it'll be as if you'd installed the binaries with `use`. -- If you want to talk to some internal source, you can use the - `--remote-bucket` and `--remote-server` options. The former sets which +- If you want to talk to some internal source via HTTP, you can simply set `--index` + The index must contain references to envtest binary archives in the same format as: + https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml + +- If you want to talk to some internal source in a GCS "style", you can use the + `--remote-bucket` and `--remote-server` options together with `--use-deprecated-gcs`. + Note: This is deprecated and will be removed soon. The former sets which GCS bucket to download from, and the latter sets the host to talk to as if it were a GCS endpoint. Theoretically, you could use the latter version to run an internal "mirror" -- the tool expects @@ -114,7 +136,7 @@ Then, you have a few options for managing your binaries: ```json {"items": [ {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, - {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""} ]} ``` diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go index e12a107352e..24857916d75 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/env/env.go @@ -26,24 +26,28 @@ import ( // envtest binaries. // // In general, the methods will use the Exit{,Cause} functions from this package -// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. type Env struct { // the following *must* be set on input // Platform is our current platform Platform versions.PlatformItem - // VerifiySum indicates whether or not we should run checksums. + // VerifySum indicates whether we should run checksums. VerifySum bool - // NoDownload forces us to not contact GCS, looking only - // at local files instead. + // NoDownload forces us to not contact remote services, + // looking only at local files instead. NoDownload bool // ForceDownload forces us to ignore local files and always - // contact GCS & re-download. + // contact remote services & re-download. ForceDownload bool - // Client is our remote client for contacting GCS. - Client *remote.Client + // UseDeprecatedGCS signals if the GCS client is used. + // Note: This will be removed together with remote.GCSClient. + UseDeprecatedGCS bool + + // Client is our remote client for contacting remote services. + Client remote.Client // Log allows us to log. Log logr.Logger @@ -133,7 +137,7 @@ func (e *Env) ListVersions(ctx context.Context) { } // LatestVersion returns the latest version matching our version selector and -// platform from the remote server, with the correspoding checksum for later +// platform from the remote server, with the corresponding checksum for later // use as well. func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { vers, err := e.Client.ListVersions(ctx) @@ -193,7 +197,7 @@ func (e *Env) ExistsAndValid() bool { // // If necessary, it will enumerate on-disk and remote versions to accomplish // this, finding a version that matches our version selector and platform. -// It will always yield a concrete version, it *may* yield a concrete platorm +// It will always yield a concrete version, it *may* yield a concrete platform // as well. func (e *Env) EnsureVersionIsSet(ctx context.Context) { if e.Version.AsConcrete() != nil { @@ -247,13 +251,13 @@ func (e *Env) EnsureVersionIsSet(ctx context.Context) { // if we're not forcing a download, and we have a newer local version, just use that if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { - e.Platform.Platform = localPlat // update our data with md5 + e.Platform.Platform = localPlat // update our data with hash e.Version.MakeConcrete(*localVer) return } // otherwise, use the new version from the server - e.Platform = platform // update our data with md5 + e.Platform = platform // update our data with hash e.Version.MakeConcrete(serverVer) } @@ -266,13 +270,13 @@ func (e *Env) Fetch(ctx context.Context) { log := e.Log.WithName("fetch") // if we didn't just fetch it, grab the sum to verify - if e.VerifySum && e.Platform.MD5 == "" { + if e.VerifySum && e.Platform.Hash == nil { if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { - ExitCause(2, err, "unable to fetch checksum for requested version") + ExitCause(2, err, "unable to fetch hash for requested version") } } if !e.VerifySum { - e.Platform.MD5 = "" // skip verification + e.Platform.Hash = nil // skip verification } var packedPath string @@ -287,7 +291,7 @@ func (e *Env) Fetch(ctx context.Context) { } }) - archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(e.UseDeprecatedGCS, *e.Version.AsConcrete())) if err != nil { ExitCause(2, err, "unable to open file to write downloaded archive to") } @@ -365,8 +369,8 @@ func (e *Env) PrintInfo(printFmt PrintFormat) { case PrintOverview: fmt.Fprintf(e.Out, "Version: %s\n", e.Version) fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) - if e.Platform.MD5 != "" { - fmt.Fprintf(e.Out, "md5: %s\n", e.Platform.MD5) + if e.Platform.Hash != nil { + fmt.Fprintf(e.Out, "%s: %s\n", e.Platform.Hash.Type, e.Platform.Hash.Value) } fmt.Fprintf(e.Out, "Path: %s\n", path) case PrintPath: @@ -409,7 +413,7 @@ func (e *Env) Sideload(ctx context.Context, input io.Reader) { } var ( - // expectedExectuables are the executables that are checked in PathMatches + // expectedExecutables are the executables that are checked in PathMatches // for non-store paths. expectedExecutables = []string{ "kube-apiserver", @@ -458,7 +462,7 @@ func (e *Env) PathMatches(value string) bool { } // versionFromPathName checks if the given path's last component looks like one -// of our versions, and, if so, what version it represents. If succesfull, +// of our versions, and, if so, what version it represents. If successful, // it'll set version and platform, and return true. Otherwise it returns // false. func (e *Env) versionFromPathName(value string) bool { diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go index 8dca7741572..7e2761a4f66 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/main.go @@ -49,10 +49,17 @@ var ( binDir = flag.String("bin-dir", "", "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") - remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from") + + useDeprecatedGCS = flag.Bool("use-deprecated-gcs", false, "use GCS to fetch envtest binaries. Note: This is deprecated and will be removed soon. For more details see: https://github.com/kubernetes-sigs/controller-runtime/pull/2811") + + // These flags are only used with --use-deprecated-gcs. + remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from (only used with --use-deprecated-gcs)") remoteServer = flag.String("remote-server", "storage.googleapis.com", "remote server to query from. You can override this if you want to run "+ - "an internal storage server instead, or for testing.") + "an internal storage server instead, or for testing. (only used with --use-deprecated-gcs)") + + // This flag is only used if --use-deprecated-gcs is not set or false (default). + index = flag.String("index", remote.DefaultIndexURL, "index to discover envtest binaries (only used if --use-deprecated-gcs is not set, or set to false)") ) // TODO(directxman12): handle interrupts? @@ -81,16 +88,29 @@ func setupEnv(globalLog logr.Logger, version string) *envp.Env { } log.V(1).Info("using binaries directory", "dir", *binDir) - env := &envp.Env{ - Log: globalLog, - Client: &remote.Client{ + var client remote.Client + if useDeprecatedGCS != nil && *useDeprecatedGCS { + client = &remote.GCSClient{ //nolint:staticcheck // deprecation accepted for now Log: globalLog.WithName("storage-client"), Bucket: *remoteBucket, Server: *remoteServer, - }, - VerifySum: *verify, - ForceDownload: *force, - NoDownload: *installedOnly, + } + log.V(1).Info("using deprecated GCS client", "bucket", *remoteBucket, "server", *remoteServer) + } else { + client = &remote.HTTPClient{ + Log: globalLog.WithName("storage-client"), + IndexURL: *index, + } + log.V(1).Info("using HTTP client", "index", *index) + } + + env := &envp.Env{ + Log: globalLog, + UseDeprecatedGCS: useDeprecatedGCS != nil && *useDeprecatedGCS, + Client: client, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, Platform: versions.PlatformItem{ Platform: versions.Platform{ OS: *targetOS, diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go index be82532583a..24efd6daffe 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/client.go @@ -1,219 +1,20 @@ // SPDX-License-Identifier: Apache-2.0 -// Copyright 2021 The Kubernetes Authors +// Copyright 2024 The Kubernetes Authors package remote import ( "context" - "crypto/md5" //nolint:gosec - "encoding/base64" - "encoding/json" - "errors" - "fmt" "io" - "net/http" - "net/url" - "path" - "sort" - "github.com/go-logr/logr" "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" ) -// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. -type objectList struct { - Items []bucketObject `json:"items"` - NextPageToken string `json:"nextPageToken"` -} - -// bucketObject is the parts we need of the GCS object metadata. -type bucketObject struct { - Name string `json:"name"` - Hash string `json:"md5Hash"` -} - -// Client is a basic client for fetching versions of the envtest binary archives -// from GCS. -type Client struct { - // Bucket is the bucket to fetch from. - Bucket string - - // Server is the GCS-like storage server - Server string - - // Log allows us to log. - Log logr.Logger - - // Insecure uses http for testing - Insecure bool -} - -func (c *Client) scheme() string { - if c.Insecure { - return "http" - } - return "https" -} - -// ListVersions lists all available tools versions in the given bucket, along -// with supported os/arch combos and the corresponding hash. -// -// The results are sorted with newer versions first. -func (c *Client) ListVersions(ctx context.Context) ([]versions.Set, error) { - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o"), - } - query := make(url.Values) - - knownVersions := map[versions.Concrete][]versions.PlatformItem{} - for cont := true; cont; { - c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) - - loc.RawQuery = query.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) - } - - err = func() error { - defer resp.Body.Close() - if resp.StatusCode != 200 { - return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) - } - - var list objectList - if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { - return fmt.Errorf("unable unmarshal bucket items list: %w", err) - } - - // continue listing if needed - cont = list.NextPageToken != "" - query.Set("pageToken", list.NextPageToken) - - for _, item := range list.Items { - ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) - if ver == nil { - c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) - continue - } - c.Log.V(1).Info("found version", "version", ver, "platform", details) - knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ - Platform: details, - MD5: item.Hash, - }) - } - - return nil - }() - if err != nil { - return nil, err - } - } - - res := make([]versions.Set, 0, len(knownVersions)) - for ver, details := range knownVersions { - res = append(res, versions.Set{Version: ver, Platforms: details}) - } - // sort in inverse order so that the newest one is first - sort.Slice(res, func(i, j int) bool { - first, second := res[i].Version, res[j].Version - return first.NewerThan(second) - }) - - return res, nil -} - -// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. -func (c *Client) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { - itemName := platform.ArchiveName(version) - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), - RawQuery: "alt=media", - } - - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) - } - - if platform.MD5 != "" { - // stream in chunks to do the checksum, don't load the whole thing into - // memory to avoid causing issues with big files. - buf := make([]byte, 32*1024) // 32KiB, same as io.Copy - checksum := md5.New() //nolint:gosec - for cont := true; cont; { - amt, err := resp.Body.Read(buf) - if err != nil && !errors.Is(err, io.EOF) { - return fmt.Errorf("unable read next chunk of %s: %w", itemName, err) - } - if amt > 0 { - // checksum never returns errors according to docs - checksum.Write(buf[:amt]) - if _, err := out.Write(buf[:amt]); err != nil { - return fmt.Errorf("unable write next chunk of %s: %w", itemName, err) - } - } - cont = amt > 0 && !errors.Is(err, io.EOF) - } - - sum := base64.StdEncoding.EncodeToString(checksum.Sum(nil)) - - if sum != platform.MD5 { - return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported from GCS)", itemName, sum, platform.MD5) - } - } else if _, err := io.Copy(out, resp.Body); err != nil { - return fmt.Errorf("unable to download %s: %w", itemName, err) - } - return nil -} - -// FetchSum fetches the checksum for the given concrete version & platform into -// the given platform item. -func (c *Client) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { - itemName := pl.ArchiveName(ver) - loc := &url.URL{ - Scheme: c.scheme(), - Host: c.Server, - Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), - } - - req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) - if err != nil { - return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) - } +// Client is an interface to get and list envtest binary archives. +type Client interface { + ListVersions(ctx context.Context) ([]versions.Set, error) - var item bucketObject - if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { - return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) - } + GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error - pl.MD5 = item.Hash - return nil + FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error } diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/gcs_client.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/gcs_client.go new file mode 100644 index 00000000000..85f321d5c5c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/gcs_client.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` + NextPageToken string `json:"nextPageToken"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +var _ Client = &GCSClient{} + +// GCSClient is a basic client for fetching versions of the envtest binary archives +// from GCS. +// +// Deprecated: This client is deprecated and will be removed soon. +// The kubebuilder GCS bucket that we use with this client might be shutdown at any time, +// see: https://github.com/kubernetes/k8s.io/issues/2647. +type GCSClient struct { + // Bucket is the bucket to fetch from. + Bucket string + + // Server is the GCS-like storage server + Server string + + // Log allows us to log. + Log logr.Logger + + // Insecure uses http for testing + Insecure bool +} + +func (c *GCSClient) scheme() string { + if c.Insecure { + return "http" + } + return "https" +} + +// ListVersions lists all available tools versions in the given bucket, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *GCSClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o"), + } + query := make(url.Values) + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for cont := true; cont; { + c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) + + loc.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) + } + + err = func() error { + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) + } + + var list objectList + if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { + return fmt.Errorf("unable unmarshal bucket items list: %w", err) + } + + // continue listing if needed + cont = list.NextPageToken != "" + query.Set("pageToken", list.NextPageToken) + + for _, item := range list.Items { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) + if ver == nil { + c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.MD5HashType, + Encoding: versions.Base64HashEncoding, + Value: item.Hash, + }, + }) + } + + return nil + }() + if err != nil { + return nil, err + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *GCSClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + itemName := platform.ArchiveName(true, version) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + RawQuery: "alt=media", + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) + } + + return readBody(resp, out, itemName, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *GCSClient) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { + itemName := pl.ArchiveName(true, ver) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) + } + + var item bucketObject + if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { + return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) + } + + pl.Hash = &versions.Hash{ + Type: versions.MD5HashType, + Encoding: versions.Base64HashEncoding, + Value: item.Hash, + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go new file mode 100644 index 00000000000..0339654a82e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/http_client.go @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/yaml" +) + +// DefaultIndexURL is the default index used in HTTPClient. +var DefaultIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" + +var _ Client = &HTTPClient{} + +// HTTPClient is a client for fetching versions of the envtest binary archives +// from an index via HTTP. +type HTTPClient struct { + // Log allows us to log. + Log logr.Logger + + // IndexURL is the URL of the index, defaults to DefaultIndexURL. + IndexURL string +} + +// Index represents an index of envtest binary archives. Example: +// +// releases: +// v1.28.0: +// envtest-v1.28.0-darwin-amd64.tar.gz: +// hash: +// selfLink: +type Index struct { + // Releases maps Kubernetes versions to Releases (envtest archives). + Releases map[string]Release `json:"releases"` +} + +// Release maps an archive name to an archive. +type Release map[string]Archive + +// Archive contains the self link to an archive and its hash. +type Archive struct { + Hash string `json:"hash"` + SelfLink string `json:"selfLink"` +} + +// ListVersions lists all available tools versions in the index, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *HTTPClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + index, err := c.getIndex(ctx) + if err != nil { + return nil, err + } + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + }, + }) + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *HTTPClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + var loc *url.URL + var name string + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + loc, err = url.Parse(archive.SelfLink) + if err != nil { + return fmt.Errorf("error parsing selfLink %q, %w", loc, err) + } + name = archiveName + break + } + } + } + if name == "" { + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", name, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", name, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q", name, req.URL, resp.Status) + } + + return readBody(resp, out, name, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *HTTPClient) FetchSum(ctx context.Context, version versions.Concrete, platform *versions.PlatformItem) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + platform.Hash = &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + } + return nil + } + } + } + + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) +} + +func (c *HTTPClient) getIndex(ctx context.Context) (*Index, error) { + indexURL := c.IndexURL + if indexURL == "" { + indexURL = DefaultIndexURL + } + + loc, err := url.Parse(indexURL) + if err != nil { + return nil, fmt.Errorf("unable to parse index URL: %w", err) + } + + c.Log.V(1).Info("listing versions", "index", indexURL) + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to get index: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to get index: %w", err) + } + + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) + } + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) + } + + var index Index + if err := yaml.Unmarshal(responseBody, &index); err != nil { + return nil, fmt.Errorf("unable to unmarshal index: %w", err) + } + return &index, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go new file mode 100644 index 00000000000..650e41282c9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/remote/read_body.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + "crypto/md5" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "net/http" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func readBody(resp *http.Response, out io.Writer, archiveName string, platform versions.PlatformItem) error { + if platform.Hash != nil { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + var hasher hash.Hash + switch platform.Hash.Type { + case versions.SHA512HashType: + hasher = sha512.New() + case versions.MD5HashType: + hasher = md5.New() //nolint:gosec // We're aware that md5 is a weak cryptographic primitive, but we don't have a choice here. + default: + return fmt.Errorf("hash type %s not implemented", platform.Hash.Type) + } + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + hasher.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + var sum string + switch platform.Hash.Encoding { + case versions.Base64HashEncoding: + sum = base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + case versions.HexHashEncoding: + sum = hex.EncodeToString(hasher.Sum(nil)) + default: + return fmt.Errorf("hash encoding %s not implemented", platform.Hash.Encoding) + } + if sum != platform.Hash.Value { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported)", archiveName, sum, platform.Hash.Value) + } + } else if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", archiveName, err) + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go index e6f258e4ac7..6001eb2a4ee 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/store/store.go @@ -182,7 +182,7 @@ func (s *Store) Add(ctx context.Context, item Item, contents io.Reader) (resErr return err } } - if err != nil && !errors.Is(err, io.EOF) { + if err != nil && !errors.Is(err, io.EOF) { //nolint:govet return fmt.Errorf("unable to finish un-tar-ing the downloaded archive: %w", err) } log.V(1).Info("unpacked archive") diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go index c053bf87574..21d38bb345b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/parse.go @@ -17,8 +17,6 @@ var ( // ConcreteVersionRE matches a concrete version anywhere in the string. ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) - // OnlyConcreteVersionRE matches a string that's just a concrete version. - OnlyConcreteVersionRE = regexp.MustCompile(`^` + ConcreteVersionRE.String() + `$`) ) // FromExpr extracts a version from a string in the form of a semver version, diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go index 16c08b38ffc..8b32ccd5bcc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/versions/platform.go @@ -37,17 +37,58 @@ func (p Platform) BaseName(ver Concrete) string { } // ArchiveName returns the full archive name for this version and platform. -func (p Platform) ArchiveName(ver Concrete) string { - return "kubebuilder-tools-" + p.BaseName(ver) + ".tar.gz" +// useGCS is deprecated and will be removed when the remote.GCSClient is removed. +func (p Platform) ArchiveName(useGCS bool, ver Concrete) string { + if useGCS { + return "kubebuilder-tools-" + p.BaseName(ver) + ".tar.gz" + } + return "envtest-v" + p.BaseName(ver) + ".tar.gz" } // PlatformItem represents a platform with corresponding // known metadata for its download. type PlatformItem struct { Platform - MD5 string + + *Hash } +// Hash of an archive with envtest binaries. +type Hash struct { + // Type of the hash. + // GCS uses MD5HashType, controller-tools uses SHA512HashType. + Type HashType + + // Encoding of the hash value. + // GCS uses Base64HashEncoding, controller-tools uses HexHashEncoding. + Encoding HashEncoding + + // Value of the hash. + Value string +} + +// HashType is the type of a hash. +type HashType string + +const ( + // SHA512HashType represents a sha512 hash + SHA512HashType HashType = "sha512" + + // MD5HashType represents a md5 hash + MD5HashType HashType = "md5" +) + +// HashEncoding is the encoding of a hash +type HashEncoding string + +const ( + // Base64HashEncoding represents base64 encoding + Base64HashEncoding HashEncoding = "base64" + + // HexHashEncoding represents hex encoding + HexHashEncoding HashEncoding = "hex" +) + // Set is a concrete version and all the corresponding platforms that it's available for. type Set struct { Version Concrete @@ -81,5 +122,7 @@ var ( // VersionPlatformRE matches concrete version-platform strings. VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) // ArchiveRE matches concrete version-platform.tar.gz strings. - ArchiveRE = regexp.MustCompile(`^kubebuilder-tools-` + versionPlatformREBase + `\.tar\.gz$`) + // The archives published to GCS by kubebuilder use the "kubebuilder-tools-" prefix (e.g. "kubebuilder-tools-1.30.0-darwin-amd64.tar.gz"). + // The archives published to GitHub releases by controller-tools use the "envtest-v" prefix (e.g. "envtest-v1.30.0-darwin-amd64.tar.gz"). + ArchiveRE = regexp.MustCompile(`^(kubebuilder-tools-|envtest-v)` + versionPlatformREBase + `\.tar\.gz$`) ) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go index 546ba9a6d72..ac8eb566da5 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go @@ -85,6 +85,20 @@ type Generator struct { // Year specifies the year to substitute for " YEAR" in the header file. Year string `marker:",optional"` + + // DeprecatedV1beta1CompatibilityPreserveUnknownFields indicates whether + // or not we should turn off field pruning for this resource. + // + // Specifies spec.preserveUnknownFields value that is false and omitted by default. + // This value can only be specified for CustomResourceDefinitions that were created with + // `apiextensions.k8s.io/v1beta1`. + // + // The field can be set for compatiblity reasons, although strongly discouraged, resource + // authors should move to a structural OpenAPI schema instead. + // + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning + // for more information about field pruning and v1beta1 resources compatibility. + DeprecatedV1beta1CompatibilityPreserveUnknownFields *bool `marker:",optional"` } func (Generator) CheckFilter() loader.NodeFilter { @@ -100,15 +114,25 @@ func transformRemoveCRDStatus(obj map[string]interface{}) error { return nil } +// transformPreserveUnknownFields adds spec.preserveUnknownFields=value. +func transformPreserveUnknownFields(value bool) func(map[string]interface{}) error { + return func(obj map[string]interface{}) error { + if spec, ok := obj["spec"].(map[interface{}]interface{}); ok { + spec["preserveUnknownFields"] = value + } + return nil + } +} + func (g Generator) Generate(ctx *genall.GenerationContext) error { parser := &Parser{ Collector: ctx.Collector, Checker: ctx.Checker, // Perform defaulting here to avoid ambiguity later - IgnoreUnexportedFields: g.IgnoreUnexportedFields != nil && *g.IgnoreUnexportedFields == true, - AllowDangerousTypes: g.AllowDangerousTypes != nil && *g.AllowDangerousTypes == true, + IgnoreUnexportedFields: g.IgnoreUnexportedFields != nil && *g.IgnoreUnexportedFields, + AllowDangerousTypes: g.AllowDangerousTypes != nil && *g.AllowDangerousTypes, // Indicates the parser on whether to register the ObjectMeta type or not - GenerateEmbeddedObjectMeta: g.GenerateEmbeddedObjectMeta != nil && *g.GenerateEmbeddedObjectMeta == true, + GenerateEmbeddedObjectMeta: g.GenerateEmbeddedObjectMeta != nil && *g.GenerateEmbeddedObjectMeta, } AddKnownTypes(parser) @@ -146,6 +170,14 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { } headerText = strings.ReplaceAll(headerText, " YEAR", " "+g.Year) + yamlOpts := []*genall.WriteYAMLOptions{ + genall.WithTransform(transformRemoveCRDStatus), + genall.WithTransform(genall.TransformRemoveCreationTimestamp), + } + if g.DeprecatedV1beta1CompatibilityPreserveUnknownFields != nil { + yamlOpts = append(yamlOpts, genall.WithTransform(transformPreserveUnknownFields(*g.DeprecatedV1beta1CompatibilityPreserveUnknownFields))) + } + for _, groupKind := range kubeKinds { parser.NeedCRDFor(groupKind, g.MaxDescLen) crdRaw := parser.CustomResourceDefinitions[groupKind] @@ -171,7 +203,7 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { } else { fileName = fmt.Sprintf("%s_%s.%s.yaml", crdRaw.Spec.Group, crdRaw.Spec.Names.Plural, crdVersions[i]) } - if err := ctx.WriteYAML(fileName, headerText, []interface{}{crd}, genall.WithTransform(transformRemoveCRDStatus), genall.WithTransform(genall.TransformRemoveCreationTimestamp)); err != nil { + if err := ctx.WriteYAML(fileName, headerText, []interface{}{crd}, yamlOpts...); err != nil { return err } } @@ -194,7 +226,6 @@ func removeDescriptionFromMetadataProps(v *apiext.JSONSchemaProps) { if meta.Description != "" { meta.Description = "" v.Properties["metadata"] = m - } } } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go index 995af44b371..f01e9f1b316 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go @@ -26,7 +26,20 @@ limitations under the License. // be run after the rest of a given schema node has been generated. // Markers that need to be run before any other markers can also // implement ApplyFirst, but this is discouraged and may change -// in the future. +// in the future. It is recommended to implement the ApplyPriority +// interface in combination with ApplyPriorityDefault and +// ApplyPriorityFirst constants. Following is an example of how to +// implement such a marker: +// +// type MyCustomMarker string +// +// func (m MyCustomMarker) ApplyPriority() ApplyPriority { +// return ApplyPriorityFirst +// } +// +// func (m MyCustomMarker) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +// ... +// } // // All validation markers start with "+kubebuilder:validation", and // have the same name as their type name. @@ -34,7 +47,7 @@ limitations under the License. // # CRD Markers // // Markers that modify anything in the CRD itself *except* for the schema -// implement ApplyToCRD (crd.CRDMarker). They are expected to detect whether +// implement ApplyToCRD (crd.SpecMarker). They are expected to detect whether // they should apply themselves to a specific version in the CRD (as passed to // them), or to the root-level CRD for legacy cases. They are applied *after* // the rest of the CRD is computed. diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/priority.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/priority.go new file mode 100644 index 00000000000..1b448252128 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/priority.go @@ -0,0 +1,37 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +// ApplyPriority designates the order markers should be applied. +// Lower priority indicates it should be applied first +type ApplyPriority int64 + +const ( + // ApplyPriorityDefault is the default priority for markers + // that don't implement ApplyPriorityMarker + ApplyPriorityDefault ApplyPriority = 10 + + // ApplyPriorityFirst is the priority value assigned to markers + // that implement the ApplyFirst() method + ApplyPriorityFirst ApplyPriority = 1 +) + +// ApplyPriorityMarker designates the order validation markers should be applied. +// Lower priority indicates it should be applied first +type ApplyPriorityMarker interface { + ApplyPriority() ApplyPriority +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go index 0e7c426942c..5988a1b5b1e 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go @@ -42,6 +42,14 @@ func (d *definitionWithHelp) Register(reg *markers.Registry) error { return nil } +func (d *definitionWithHelp) clone() *definitionWithHelp { + newDef, newHelp := *d.Definition, *d.Help + return &definitionWithHelp{ + Definition: &newDef, + Help: &newHelp, + } +} + func must(def *markers.Definition, err error) *definitionWithHelp { return &definitionWithHelp{ Definition: markers.Must(def, err), @@ -60,7 +68,7 @@ type hasHelp interface { func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...interface{}) []*definitionWithHelp { defs := make([]*definitionWithHelp, len(objs)) for i, obj := range objs { - name := prefix + ":" + reflect.TypeOf(obj).Name() + name := prefix + reflect.TypeOf(obj).Name() def, err := markers.MakeDefinition(name, target, obj) if err != nil { panic(err) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go index a92995c802d..97dbc47c591 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go @@ -119,7 +119,9 @@ func (l ListType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (l ListType) ApplyFirst() {} +func (l ListType) ApplyPriority() ApplyPriority { + return ApplyPriorityDefault - 1 +} func (l ListMapKey) ApplyToSchema(schema *apiext.JSONSchemaProps) error { if schema.Type != "array" { diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go index 40aec5f9423..245c5ef928e 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "math" + "strings" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -27,7 +28,10 @@ import ( ) const ( - SchemalessName = "kubebuilder:validation:Schemaless" + validationPrefix = "kubebuilder:validation:" + + SchemalessName = "kubebuilder:validation:Schemaless" + ValidationItemsPrefix = validationPrefix + "items:" ) // ValidationMarkers lists all available markers that affect CRD schema generation, @@ -35,7 +39,9 @@ const ( // All markers start with `+kubebuilder:validation:`, and continue with their type name. // A copy is produced of all markers that describes types as well, for making types // reusable and writing complex validations on slice items. -var ValidationMarkers = mustMakeAllWithPrefix("kubebuilder:validation", markers.DescribesField, +// At last a copy of all markers with the prefix `+kubebuilder:validation:items:` is +// produced for marking slice fields and types. +var ValidationMarkers = mustMakeAllWithPrefix(validationPrefix, markers.DescribesField, // numeric markers @@ -110,14 +116,22 @@ func init() { AllDefinitions = append(AllDefinitions, ValidationMarkers...) for _, def := range ValidationMarkers { - newDef := *def.Definition - // copy both parts so we don't change the definition - typDef := definitionWithHelp{ - Definition: &newDef, - Help: def.Help, - } + typDef := def.clone() typDef.Target = markers.DescribesType - AllDefinitions = append(AllDefinitions, &typDef) + AllDefinitions = append(AllDefinitions, typDef) + + itemsName := ValidationItemsPrefix + strings.TrimPrefix(def.Name, validationPrefix) + + itemsFieldDef := def.clone() + itemsFieldDef.Name = itemsName + itemsFieldDef.Help.Summary = "for array items " + itemsFieldDef.Help.Summary + AllDefinitions = append(AllDefinitions, itemsFieldDef) + + itemsTypDef := def.clone() + itemsTypDef.Name = itemsName + itemsTypDef.Help.Summary = "for array items " + itemsTypDef.Help.Summary + itemsTypDef.Target = markers.DescribesType + AllDefinitions = append(AllDefinitions, itemsTypDef) } AllDefinitions = append(AllDefinitions, FieldOnlyMarkers...) @@ -295,8 +309,9 @@ func isIntegral(value float64) bool { // This marker may be repeated to specify multiple expressions, all of // which must evaluate to true. type XValidation struct { - Rule string - Message string `marker:",optional"` + Rule string + Message string `marker:",optional"` + MessageExpression string `marker:"messageExpression,optional"` } func (m Maximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { @@ -464,7 +479,9 @@ func (m Type) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m Type) ApplyFirst() {} +func (m Type) ApplyPriority() ApplyPriority { + return ApplyPriorityDefault - 1 +} func (m Nullable) ApplyToSchema(schema *apiext.JSONSchemaProps) error { schema.Nullable = true @@ -512,12 +529,15 @@ func (m XIntOrString) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m XIntOrString) ApplyFirst() {} +func (m XIntOrString) ApplyPriority() ApplyPriority { + return ApplyPriorityDefault - 1 +} func (m XValidation) ApplyToSchema(schema *apiext.JSONSchemaProps) error { schema.XValidations = append(schema.XValidations, apiext.ValidationRule{ - Rule: m.Rule, - Message: m.Message, + Rule: m.Rule, + Message: m.Message, + MessageExpression: m.MessageExpression, }) return nil } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go index d8a910824db..c10ffb2fcc4 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go @@ -28,8 +28,8 @@ func (Default) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "sets the default value for this field. ", - Details: "A default value will be accepted as any value valid for the field. Formatting for common types include: boolean: `true`, string: `Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy: \"delete\"}`). Defaults should be defined in pruned form, and only best-effort validation will be performed. Full validation of a default requires submission of the containing CRD to an apiserver.", + Summary: "sets the default value for this field.", + Details: "A default value will be accepted as any value valid for the\nfield. Formatting for common types include: boolean: `true`, string:\n`Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy:\n\"delete\"}`). Defaults should be defined in pruned form, and only best-effort\nvalidation will be performed. Full validation of a default requires\nsubmission of the containing CRD to an apiserver.", }, FieldHelp: map[string]markers.DetailedHelp{ "Value": { @@ -71,8 +71,8 @@ func (Example) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "sets the example value for this field. ", - Details: "An example value will be accepted as any value valid for the field. Formatting for common types include: boolean: `true`, string: `Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy: \"delete\"}`). Examples should be defined in pruned form, and only best-effort validation will be performed. Full validation of an example requires submission of the containing CRD to an apiserver.", + Summary: "sets the example value for this field.", + Details: "An example value will be accepted as any value valid for the\nfield. Formatting for common types include: boolean: `true`, string:\n`Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy:\n\"delete\"}`). Examples should be defined in pruned form, and only best-effort\nvalidation will be performed. Full validation of an example requires\nsubmission of the containing CRD to an apiserver.", }, FieldHelp: map[string]markers.DetailedHelp{ "Value": { @@ -109,8 +109,8 @@ func (Format) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies additional \"complex\" formatting for this field. ", - Details: "For example, a date-time field would be marked as \"type: string\" and \"format: date-time\".", + Summary: "specifies additional \"complex\" formatting for this field.", + Details: "For example, a date-time field would be marked as \"type: string\" and\n\"format: date-time\".", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -120,8 +120,8 @@ func (ListMapKey) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies the keys to map listTypes. ", - Details: "It indicates the index of a map list. They can be repeated if multiple keys must be used. It can only be used when ListType is set to map, and the keys should be scalar types.", + Summary: "specifies the keys to map listTypes.", + Details: "It indicates the index of a map list. They can be repeated if multiple keys\nmust be used. It can only be used when ListType is set to map, and the keys\nshould be scalar types.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -131,8 +131,8 @@ func (ListType) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies the type of data-structure that the list represents (map, set, atomic). ", - Details: "Possible data-structure types of a list are: \n - \"map\": it needs to have a key field, which will be used to build an associative list. A typical example is a the pod container list, which is indexed by the container name. \n - \"set\": Fields need to be \"scalar\", and there can be only one occurrence of each. \n - \"atomic\": All the fields in the list are treated as a single value, are typically manipulated together by the same actor.", + Summary: "specifies the type of data-structure that the list", + Details: "represents (map, set, atomic).\n\n\nPossible data-structure types of a list are:\n\n\n - \"map\": it needs to have a key field, which will be used to build an\n associative list. A typical example is a the pod container list,\n which is indexed by the container name.\n\n\n - \"set\": Fields need to be \"scalar\", and there can be only one\n occurrence of each.\n\n\n - \"atomic\": All the fields in the list are treated as a single value,\n are typically manipulated together by the same actor.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -142,8 +142,8 @@ func (MapType) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies the level of atomicity of the map; i.e. whether each item in the map is independent of the others, or all fields are treated as a single unit. ", - Details: "Possible values: \n - \"granular\": items in the map are independent of each other, and can be manipulated by different actors. This is the default behavior. \n - \"atomic\": all fields are treated as one unit. Any changes have to replace the entire map.", + Summary: "specifies the level of atomicity of the map;", + Details: "i.e. whether each item in the map is independent of the others,\nor all fields are treated as a single unit.\n\n\nPossible values:\n\n\n - \"granular\": items in the map are independent of each other,\n and can be manipulated by different actors.\n This is the default behavior.\n\n\n - \"atomic\": all fields are treated as one unit.\n Any changes have to replace the entire map.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -197,8 +197,8 @@ func (Metadata) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD", DetailedHelp: markers.DetailedHelp{ - Summary: "configures the additional annotations or labels for this CRD. For example adding annotation \"api-approved.kubernetes.io\" for a CRD with Kubernetes groups, or annotation \"cert-manager.io/inject-ca-from-secret\" for a CRD that needs CA injection.", - Details: "", + Summary: "configures the additional annotations or labels for this CRD.", + Details: "For example adding annotation \"api-approved.kubernetes.io\" for a CRD with Kubernetes groups,\nor annotation \"cert-manager.io/inject-ca-from-secret\" for a CRD that needs CA injection.", }, FieldHelp: map[string]markers.DetailedHelp{ "Annotations": { @@ -272,7 +272,7 @@ func (Nullable) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "marks this field as allowing the \"null\" value. ", + Summary: "marks this field as allowing the \"null\" value.", Details: "This is often not necessary, but may be helpful with custom serialization.", }, FieldHelp: map[string]markers.DetailedHelp{}, @@ -303,8 +303,8 @@ func (PrintColumn) Help() *markers.DefinitionHelp { Details: "", }, "Type": { - Summary: "indicates the type of the column. ", - Details: "It may be any OpenAPI data type listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + Summary: "indicates the type of the column.", + Details: "It may be any OpenAPI data type listed at\nhttps://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", }, "JSONPath": { Summary: "specifies the jsonpath expression used to extract the value of the column.", @@ -315,12 +315,12 @@ func (PrintColumn) Help() *markers.DefinitionHelp { Details: "", }, "Format": { - Summary: "specifies the format of the column. ", - Details: "It may be any OpenAPI data format corresponding to the type, listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + Summary: "specifies the format of the column.", + Details: "It may be any OpenAPI data format corresponding to the type, listed at\nhttps://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", }, "Priority": { - Summary: "indicates how important it is that this column be displayed. ", - Details: "Lower priority (*higher* numbered) columns will be hidden if the terminal width is too small.", + Summary: "indicates how important it is that this column be displayed.", + Details: "Lower priority (*higher* numbered) columns will be hidden if the terminal\nwidth is too small.", }, }, } @@ -335,24 +335,24 @@ func (Resource) Help() *markers.DefinitionHelp { }, FieldHelp: map[string]markers.DetailedHelp{ "Path": { - Summary: "specifies the plural \"resource\" for this CRD. ", - Details: "It generally corresponds to a plural, lower-cased version of the Kind. See https://book.kubebuilder.io/cronjob-tutorial/gvks.html.", + Summary: "specifies the plural \"resource\" for this CRD.", + Details: "It generally corresponds to a plural, lower-cased version of the Kind.\nSee https://book.kubebuilder.io/cronjob-tutorial/gvks.html.", }, "ShortName": { - Summary: "specifies aliases for this CRD. ", - Details: "Short names are often used when people have work with your resource over and over again. For instance, \"rs\" for \"replicaset\" or \"crd\" for customresourcedefinition.", + Summary: "specifies aliases for this CRD.", + Details: "Short names are often used when people have work with your resource\nover and over again. For instance, \"rs\" for \"replicaset\" or\n\"crd\" for customresourcedefinition.", }, "Categories": { - Summary: "specifies which group aliases this resource is part of. ", - Details: "Group aliases are used to work with groups of resources at once. The most common one is \"all\" which covers about a third of the base resources in Kubernetes, and is generally used for \"user-facing\" resources.", + Summary: "specifies which group aliases this resource is part of.", + Details: "Group aliases are used to work with groups of resources at once.\nThe most common one is \"all\" which covers about a third of the base\nresources in Kubernetes, and is generally used for \"user-facing\" resources.", }, "Singular": { - Summary: "overrides the singular form of your resource. ", + Summary: "overrides the singular form of your resource.", Details: "The singular form is otherwise defaulted off the plural (path).", }, "Scope": { - Summary: "overrides the scope of the CRD (Cluster vs Namespaced). ", - Details: "Scope defaults to \"Namespaced\". Cluster-scoped (\"Cluster\") resources don't exist in namespaces.", + Summary: "overrides the scope of the CRD (Cluster vs Namespaced).", + Details: "Scope defaults to \"Namespaced\". Cluster-scoped (\"Cluster\") resources\ndon't exist in namespaces.", }, }, } @@ -362,8 +362,8 @@ func (Schemaless) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "marks a field as being a schemaless object. ", - Details: "Schemaless objects are not introspected, so you must provide any type and validation information yourself. One use for this tag is for embedding fields that hold JSONSchema typed objects. Because this field disables all type checking, it is recommended to be used only as a last resort.", + Summary: "marks a field as being a schemaless object.", + Details: "Schemaless objects are not introspected, so you must provide\nany type and validation information yourself. One use for this\ntag is for embedding fields that hold JSONSchema typed objects.\nBecause this field disables all type checking, it is recommended\nto be used only as a last resort.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -373,8 +373,8 @@ func (SkipVersion) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD", DetailedHelp: markers.DetailedHelp{ - Summary: "removes the particular version of the CRD from the CRDs spec. ", - Details: "This is useful if you need to skip generating and listing version entries for 'internal' resource versions, which typically exist if using the Kubernetes upstream conversion-gen tool.", + Summary: "removes the particular version of the CRD from the CRDs spec.", + Details: "This is useful if you need to skip generating and listing version entries\nfor 'internal' resource versions, which typically exist if using the\nKubernetes upstream conversion-gen tool.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -384,8 +384,8 @@ func (StorageVersion) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD", DetailedHelp: markers.DetailedHelp{ - Summary: "marks this version as the \"storage version\" for the CRD for conversion. ", - Details: "When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD), one version is set as the \"storage version\" to be stored in etcd. Attempting to store any other version will result in conversion to the storage version via a conversion webhook.", + Summary: "marks this version as the \"storage version\" for the CRD for conversion.", + Details: "When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD),\none version is set as the \"storage version\" to be stored in etcd. Attempting to store any\nother version will result in conversion to the storage version via a conversion webhook.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -395,8 +395,8 @@ func (StructType) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies the level of atomicity of the struct; i.e. whether each field in the struct is independent of the others, or all fields are treated as a single unit. ", - Details: "Possible values: \n - \"granular\": fields in the struct are independent of each other, and can be manipulated by different actors. This is the default behavior. \n - \"atomic\": all fields are treated as one unit. Any changes have to replace the entire struct.", + Summary: "specifies the level of atomicity of the struct;", + Details: "i.e. whether each field in the struct is independent of the others,\nor all fields are treated as a single unit.\n\n\nPossible values:\n\n\n - \"granular\": fields in the struct are independent of each other,\n and can be manipulated by different actors.\n This is the default behavior.\n\n\n - \"atomic\": all fields are treated as one unit.\n Any changes have to replace the entire struct.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -419,8 +419,8 @@ func (SubresourceScale) Help() *markers.DefinitionHelp { Details: "", }, "SelectorPath": { - Summary: "specifies the jsonpath to the pod label selector field for the scale's status. ", - Details: "The selector field must be the *string* form (serialized form) of a selector. Setting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler.", + Summary: "specifies the jsonpath to the pod label selector field for the scale's status.", + Details: "The selector field must be the *string* form (serialized form) of a selector.\nSetting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler.", }, }, } @@ -441,8 +441,8 @@ func (Type) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "overrides the type for this field (which defaults to the equivalent of the Go type). ", - Details: "This generally must be paired with custom serialization. For example, the metav1.Time field would be marked as \"type: string\" and \"format: date-time\".", + Summary: "overrides the type for this field (which defaults to the equivalent of the Go type).", + Details: "This generally must be paired with custom serialization. For example, the\nmetav1.Time field would be marked as \"type: string\" and \"format: date-time\".", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -463,7 +463,7 @@ func (UnservedVersion) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD", DetailedHelp: markers.DetailedHelp{ - Summary: "does not serve this version. ", + Summary: "does not serve this version.", Details: "This is useful if you need to drop support for a version in favor of a newer version.", }, FieldHelp: map[string]markers.DetailedHelp{}, @@ -474,8 +474,8 @@ func (XEmbeddedResource) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields. ", - Details: "An embedded resource is a value that has apiVersion, kind and metadata fields. They are validated implicitly according to the semantics of the currently running apiserver. It is not necessary to add any additional schema for these field, yet it is possible. This can be combined with PreserveUnknownFields.", + Summary: "EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields.", + Details: "An embedded resource is a value that has apiVersion, kind and metadata fields.\nThey are validated implicitly according to the semantics of the currently\nrunning apiserver. It is not necessary to add any additional schema for these\nfield, yet it is possible. This can be combined with PreserveUnknownFields.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -485,8 +485,8 @@ func (XIntOrString) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "IntOrString marks a fields as an IntOrString. ", - Details: "This is required when applying patterns or other validations to an IntOrString field. Knwon information about the type is applied during the collapse phase and as such is not normally available during marker application.", + Summary: "IntOrString marks a fields as an IntOrString.", + Details: "This is required when applying patterns or other validations to an IntOrString\nfield. Knwon information about the type is applied during the collapse phase\nand as such is not normally available during marker application.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -496,8 +496,8 @@ func (XPreserveUnknownFields) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "PreserveUnknownFields stops the apiserver from pruning fields which are not specified. ", - Details: "By default the apiserver drops unknown fields from the request payload during the decoding step. This marker stops the API server from doing so. It affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. \n NB: The kubebuilder:validation:XPreserveUnknownFields variant is deprecated in favor of the kubebuilder:pruning:PreserveUnknownFields variant. They function identically.", + Summary: "PreserveUnknownFields stops the apiserver from pruning fields which are not specified.", + Details: "By default the apiserver drops unknown fields from the request payload\nduring the decoding step. This marker stops the API server from doing so.\nIt affects fields recursively, but switches back to normal pruning behaviour\nif nested properties or additionalProperties are specified in the schema.\nThis can either be true or undefined. False\nis forbidden.\n\n\nNB: The kubebuilder:validation:XPreserveUnknownFields variant is deprecated\nin favor of the kubebuilder:pruning:PreserveUnknownFields variant. They function\nidentically.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -507,8 +507,8 @@ func (XValidation) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "marks a field as requiring a value for which a given expression evaluates to true. ", - Details: "This marker may be repeated to specify multiple expressions, all of which must evaluate to true.", + Summary: "marks a field as requiring a value for which a given", + Details: "expression evaluates to true.\n\n\nThis marker may be repeated to specify multiple expressions, all of\nwhich must evaluate to true.", }, FieldHelp: map[string]markers.DetailedHelp{ "Rule": { @@ -519,6 +519,10 @@ func (XValidation) Help() *markers.DefinitionHelp { Summary: "", Details: "", }, + "MessageExpression": { + Summary: "", + Details: "", + }, }, } } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go index e76d3ea88c8..a6e3cd60124 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go @@ -22,6 +22,7 @@ import ( "go/ast" "go/token" "go/types" + "sort" "strings" apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -124,39 +125,64 @@ func infoToSchema(ctx *schemaContext) *apiext.JSONSchemaProps { return typeToSchema(ctx, ctx.info.RawSpec.Type) } -// applyMarkers applies schema markers to the given schema, respecting "apply first" markers. +// applyMarkers applies schema markers given their priority to the given schema func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *apiext.JSONSchemaProps, node ast.Node) { - // apply "apply first" markers first... - for _, markerValues := range markerSet { + markers := make([]SchemaMarker, 0, len(markerSet)) + itemsMarkers := make([]SchemaMarker, 0, len(markerSet)) + itemsMarkerNames := make(map[SchemaMarker]string) + + for markerName, markerValues := range markerSet { for _, markerValue := range markerValues { - if _, isApplyFirst := markerValue.(applyFirstMarker); !isApplyFirst { - continue + if schemaMarker, isSchemaMarker := markerValue.(SchemaMarker); isSchemaMarker { + if strings.HasPrefix(markerName, crdmarkers.ValidationItemsPrefix) { + itemsMarkers = append(itemsMarkers, schemaMarker) + itemsMarkerNames[schemaMarker] = markerName + } else { + markers = append(markers, schemaMarker) + } } + } + } - schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) - if !isSchemaMarker { - continue - } + cmpPriority := func(markers []SchemaMarker, i, j int) bool { + var iPriority, jPriority crdmarkers.ApplyPriority - if err := schemaMarker.ApplyToSchema(props); err != nil { - ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) - } + switch m := markers[i].(type) { + case crdmarkers.ApplyPriorityMarker: + iPriority = m.ApplyPriority() + case applyFirstMarker: + iPriority = crdmarkers.ApplyPriorityFirst + default: + iPriority = crdmarkers.ApplyPriorityDefault } + + switch m := markers[j].(type) { + case crdmarkers.ApplyPriorityMarker: + jPriority = m.ApplyPriority() + case applyFirstMarker: + jPriority = crdmarkers.ApplyPriorityFirst + default: + jPriority = crdmarkers.ApplyPriorityDefault + } + + return iPriority < jPriority } + sort.Slice(markers, func(i, j int) bool { return cmpPriority(markers, i, j) }) + sort.Slice(itemsMarkers, func(i, j int) bool { return cmpPriority(itemsMarkers, i, j) }) - // ...then the rest of the markers - for _, markerValues := range markerSet { - for _, markerValue := range markerValues { - if _, isApplyFirst := markerValue.(applyFirstMarker); isApplyFirst { - // skip apply-first markers, which were already applied - continue - } + for _, schemaMarker := range markers { + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } - schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) - if !isSchemaMarker { - continue - } - if err := schemaMarker.ApplyToSchema(props); err != nil { + for _, schemaMarker := range itemsMarkers { + if props.Type != "array" || props.Items == nil || props.Items.Schema == nil { + err := fmt.Errorf("must apply %s to an array value, found %s", itemsMarkerNames[schemaMarker], props.Type) + ctx.pkg.AddError(loader.ErrFromNode(err, node)) + } else { + itemsSchema := props.Items.Schema + if err := schemaMarker.ApplyToSchema(itemsSchema); err != nil { ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) } } @@ -453,7 +479,7 @@ func builtinToType(basic *types.Basic, allowDangerousTypes bool) (typ string, fo // Open coded go/types representation of encoding/json.Marshaller var jsonMarshaler = types.NewInterfaceType([]*types.Func{ types.NewFunc(token.NoPos, nil, "MarshalJSON", - types.NewSignature(nil, nil, + types.NewSignatureType(nil, nil, nil, nil, types.NewTuple( types.NewVar(token.NoPos, nil, "", types.NewSlice(types.Universe.Lookup("byte").Type())), types.NewVar(token.NoPos, nil, "", types.Universe.Lookup("error").Type())), false)), diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go index 68b7619ad3f..c1aa3853f77 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go @@ -33,20 +33,20 @@ func (Generator) Help() *markers.DefinitionHelp { }, FieldHelp: map[string]markers.DetailedHelp{ "IgnoreUnexportedFields": { - Summary: "indicates that we should skip unexported fields. ", + Summary: "indicates that we should skip unexported fields.", Details: "Left unspecified, the default is false.", }, "AllowDangerousTypes": { - Summary: "allows types which are usually omitted from CRD generation because they are not recommended. ", - Details: "Currently the following additional types are allowed when this is true: float32 float64 \n Left unspecified, the default is false", + Summary: "allows types which are usually omitted from CRD generation", + Details: "because they are not recommended.\n\n\nCurrently the following additional types are allowed when this is true:\nfloat32\nfloat64\n\n\nLeft unspecified, the default is false", }, "MaxDescLen": { - Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema. ", - Details: "0 indicates drop the description for all fields completely. n indicates limit the description to at most n characters and truncate the description to closest sentence boundary if it exceeds n characters.", + Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema.", + Details: "0 indicates drop the description for all fields completely.\nn indicates limit the description to at most n characters and truncate the description to\nclosest sentence boundary if it exceeds n characters.", }, "CRDVersions": { - Summary: "specifies the target API versions of the CRD type itself to generate. Defaults to v1. ", - Details: "Currently, the only supported value is v1. \n The first version listed will be assumed to be the \"default\" version and will not get a version suffix in the output filename. \n You'll need to use \"v1\" to get support for features like defaulting, along with an API server that supports it (Kubernetes 1.16+).", + Summary: "specifies the target API versions of the CRD type itself to", + Details: "generate. Defaults to v1.\n\n\nCurrently, the only supported value is v1.\n\n\nThe first version listed will be assumed to be the \"default\" version and\nwill not get a version suffix in the output filename.\n\n\nYou'll need to use \"v1\" to get support for features like defaulting,\nalong with an API server that supports it (Kubernetes 1.16+).", }, "GenerateEmbeddedObjectMeta": { Summary: "specifies if any embedded ObjectMeta in the CRD should be generated", @@ -60,6 +60,10 @@ func (Generator) Help() *markers.DefinitionHelp { Summary: "specifies the year to substitute for \" YEAR\" in the header file.", Details: "", }, + "DeprecatedV1beta1CompatibilityPreserveUnknownFields": { + Summary: "indicates whether", + Details: "or not we should turn off field pruning for this resource.\n\n\nSpecifies spec.preserveUnknownFields value that is false and omitted by default.\nThis value can only be specified for CustomResourceDefinitions that were created with\n`apiextensions.k8s.io/v1beta1`.\n\n\nThe field can be set for compatiblity reasons, although strongly discouraged, resource\nauthors should move to a structural OpenAPI schema instead.\n\n\nSee https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning\nfor more information about field pruning and v1beta1 resources compatibility.", + }, }, } } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go index c1d3b708c73..a85cc8577b9 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go @@ -191,7 +191,6 @@ import ( if err != nil { pkg.AddError(err) } - } // generateForPackage generates DeepCopy and runtime.Object implementations for diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go index 55df3bfabd6..081c0655113 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go @@ -28,8 +28,8 @@ func (Generator) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "generates code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.", - Details: "", + Summary: "generates code containing DeepCopy, DeepCopyInto, and", + Details: "DeepCopyObject method implementations.", }, FieldHelp: map[string]markers.DetailedHelp{ "HeaderFile": { diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go index 658b96bb35b..97d7d67a64d 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go @@ -74,7 +74,6 @@ func RegistryFromOptions(optionsRegistry *markers.Registry, options []string) (* // further modified. Not default generators are used if none are specified -- you can check // the output and rerun for that. func FromOptions(optionsRegistry *markers.Registry, options []string) (*Runtime, error) { - protoRt, err := protoFromOptions(optionsRegistry, options) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go index 6f58013b225..998061b9bbe 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go @@ -28,7 +28,7 @@ func (InputPaths) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "represents paths and go-style path patterns to use as package roots. ", + Summary: "represents paths and go-style path patterns to use as package roots.", Details: "Multiple paths can be specified using \"{path1, path2, path3}\".", }, FieldHelp: map[string]markers.DetailedHelp{}, @@ -39,8 +39,8 @@ func (OutputArtifacts) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "outputs artifacts to different locations, depending on whether they're package-associated or not. ", - Details: "Non-package associated artifacts are output to the Config directory, while package-associated ones are output to their package's source files' directory, unless an alternate path is specified in Code.", + Summary: "outputs artifacts to different locations, depending on", + Details: "whether they're package-associated or not.\n\n\nNon-package associated artifacts\nare output to the Config directory, while package-associated ones are output\nto their package's source files' directory, unless an alternate path is\nspecified in Code.", }, FieldHelp: map[string]markers.DetailedHelp{ "Config": { @@ -59,8 +59,8 @@ func (OutputToDirectory) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "outputs each artifact to the given directory, regardless of if it's package-associated or not.", - Details: "", + Summary: "outputs each artifact to the given directory, regardless", + Details: "of if it's package-associated or not.", }, FieldHelp: map[string]markers.DetailedHelp{}, } @@ -81,7 +81,7 @@ func (outputToStdout) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "outputs everything to standard-out, with no separation. ", + Summary: "outputs everything to standard-out, with no separation.", Details: "Generally useful for single-artifact outputs.", }, FieldHelp: map[string]markers.DetailedHelp{}, diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go index 7762e53e733..2c5cf4c97fb 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go @@ -370,7 +370,7 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err cfg: cfg, packages: make(map[*packages.Package]*Package), } - l.cfg.Mode |= packages.LoadImports | packages.NeedTypesSizes + l.cfg.Mode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedImports | packages.NeedTypesSizes if l.cfg.Fset == nil { l.cfg.Fset = token.NewFileSet() } @@ -393,7 +393,7 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err // and try and prevent packages from showing up twice when nested module // support is enabled. there is not harm that comes from this per se, but // it makes testing easier when a known number of modules can be asserted - uniquePkgIDs := sets.String{} + uniquePkgIDs := sets.Set[string]{} // loadPackages returns the Go packages for the provided roots // @@ -489,7 +489,6 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err p string, d os.DirEntry, e error) error { - if e != nil { return e } @@ -518,7 +517,6 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err // get the absolute path of the root if !filepath.IsAbs(r) { - // if the initial value of cfg.Dir was non-empty then use it when // building the absolute path to this root. otherwise use the // filepath.Abs function to get the absolute path of the root based @@ -548,7 +546,6 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err if err := filepath.WalkDir( d, addNestedGoModulesToRoots); err != nil { - return nil, err } } @@ -604,9 +601,9 @@ func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, err // references with those from the rootPkgs list. This ensures the // kubebuilder marker generation is handled correctly. For more info, // please see issue 680. -func visitImports(rootPkgs []*Package, pkg *Package, seen sets.String) { +func visitImports(rootPkgs []*Package, pkg *Package, seen sets.Set[string]) { if seen == nil { - seen = sets.String{} + seen = sets.Set[string]{} } for importedPkgID, importedPkg := range pkg.Imports() { for i := range rootPkgs { diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go index 37c3295f140..f58a29f7288 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go @@ -18,7 +18,6 @@ package loader import ( "fmt" - "go/ast" "strconv" "sync" @@ -164,7 +163,7 @@ func allReferencedPackages(pkg *Package, filterNodes NodeFilter) []*Package { refsByFile[file] = refs } - EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + EachType(pkg, func(file *ast.File, _ *ast.GenDecl, spec *ast.TypeSpec) { refs := refsByFile[file] refs.collectReferences(spec.Type, filterNodes) }) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go index 63aa7344b2f..23f52e3d275 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go @@ -388,7 +388,6 @@ func (v markerSubVisitor) Visit(node ast.Node) ast.Visitor { v.commentInd = lastCommentInd + 1 return resVisitor - } // associatedCommentsFor returns the doc comment group (if relevant and present) and end-of-line comment diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go index c8bbe9d5463..50bdf322c71 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go @@ -210,7 +210,6 @@ func GenerateRoles(ctx *genall.GenerationContext, roleName string) ([]interface{ var policyRules []rbacv1.PolicyRule for _, key := range keys { policyRules = append(policyRules, ruleMap[key].ToRule()) - } return policyRules } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go index 5a83941c38e..20c707b1d13 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go @@ -65,8 +65,8 @@ func (Rule) Help() *markers.DefinitionHelp { Details: "", }, "ResourceNames": { - Summary: "specifies the names of the API resources that this rule encompasses. ", - Details: "Create requests cannot be restricted by resourcename, as the object's name is not known at authorization time.", + Summary: "specifies the names of the API resources that this rule encompasses.", + Details: "Create requests cannot be restricted by resourcename, as the object's name\nis not known at authorization time.", }, "Verbs": { Summary: "specifies the (lowercase) kubernetes API verbs that this rule encompasses.", @@ -77,8 +77,8 @@ func (Rule) Help() *markers.DefinitionHelp { Details: "", }, "Namespace": { - Summary: "specifies the scope of the Rule. If not set, the Rule belongs to the generated ClusterRole. If set, the Rule belongs to a Role, whose namespace is specified by this field.", - Details: "", + Summary: "specifies the scope of the Rule.", + Details: "If not set, the Rule belongs to the generated ClusterRole.\nIf set, the Rule belongs to a Role, whose namespace is specified by this field.", }, }, } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go index 8080aeae720..9e5a8513197 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go @@ -92,7 +92,7 @@ func (g Generator) Generate(ctx *genall.GenerationContext) (result error) { Collector: ctx.Collector, Checker: ctx.Checker, // Indicates the parser on whether to register the ObjectMeta type or not - GenerateEmbeddedObjectMeta: g.GenerateEmbeddedObjectMeta != nil && *g.GenerateEmbeddedObjectMeta == true, + GenerateEmbeddedObjectMeta: g.GenerateEmbeddedObjectMeta != nil && *g.GenerateEmbeddedObjectMeta, } crdgen.AddKnownTypes(parser) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go index 6e1d5a18c03..db4cdb0749d 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go @@ -28,8 +28,8 @@ func (Generator) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "", DetailedHelp: markers.DetailedHelp{ - Summary: "patches existing CRDs with new schemata. ", - Details: "It will generate output for each \"CRD Version\" (API version of the CRD type itself) , e.g. apiextensions/v1) available.", + Summary: "patches existing CRDs with new schemata.", + Details: "It will generate output for each \"CRD Version\" (API version of the CRD type\nitself) , e.g. apiextensions/v1) available.", }, FieldHelp: map[string]markers.DetailedHelp{ "ManifestsPath": { @@ -37,8 +37,8 @@ func (Generator) Help() *markers.DefinitionHelp { Details: "", }, "MaxDescLen": { - Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema. ", - Details: "0 indicates drop the description for all fields completely. n indicates limit the description to at most n characters and truncate the description to closest sentence boundary if it exceeds n characters.", + Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema.", + Details: "0 indicates drop the description for all fields completely.\nn indicates limit the description to at most n characters and truncate the description to\nclosest sentence boundary if it exceeds n characters.", }, "GenerateEmbeddedObjectMeta": { Summary: "specifies if any embedded ObjectMeta in the CRD should be generated", diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go b/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go index 09c8efcf402..f2709df4a71 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go @@ -13,6 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +// Package version provides the version of the main module. package version import ( diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go index 26d07292036..5652b4c45da 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go @@ -289,7 +289,6 @@ func (c Config) clientConfig() (admissionregv1.WebhookClientConfig, error) { return admissionregv1.WebhookClientConfig{ URL: &url, }, nil - } // sideEffects returns the sideEffects config for a webhook. @@ -486,7 +485,7 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { for k, v := range versionedWebhooks { var fileName string if k == defaultWebhookVersion { - fileName = fmt.Sprintf("manifests.yaml") + fileName = "manifests.yaml" } else { fileName = fmt.Sprintf("manifests.%s.yaml", k) } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go index 4dae8604675..d40bcc81f6b 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go @@ -28,29 +28,29 @@ func (Config) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "Webhook", DetailedHelp: markers.DetailedHelp{ - Summary: "specifies how a webhook should be served. ", - Details: "It specifies only the details that are intrinsic to the application serving it (e.g. the resources it can handle, or the path it serves on).", + Summary: "specifies how a webhook should be served.", + Details: "It specifies only the details that are intrinsic to the application serving\nit (e.g. the resources it can handle, or the path it serves on).", }, FieldHelp: map[string]markers.DetailedHelp{ "Mutating": { - Summary: "marks this as a mutating webhook (it's validating only if false) ", - Details: "Mutating webhooks are allowed to change the object in their response, and are called *before* all validating webhooks. Mutating webhooks may choose to reject an object, similarly to a validating webhook.", + Summary: "marks this as a mutating webhook (it's validating only if false)", + Details: "Mutating webhooks are allowed to change the object in their response,\nand are called *before* all validating webhooks. Mutating webhooks may\nchoose to reject an object, similarly to a validating webhook.", }, "FailurePolicy": { - Summary: "specifies what should happen if the API server cannot reach the webhook. ", - Details: "It may be either \"ignore\" (to skip the webhook and continue on) or \"fail\" (to reject the object in question).", + Summary: "specifies what should happen if the API server cannot reach the webhook.", + Details: "It may be either \"ignore\" (to skip the webhook and continue on) or \"fail\" (to reject\nthe object in question).", }, "MatchPolicy": { - Summary: "defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" (match only if it exactly matches the specified rule) or \"Equivalent\" (match a request if it modifies a resource listed in rules, even via another API group or version).", - Details: "", + Summary: "defines how the \"rules\" list is used to match incoming requests.", + Details: "Allowed values are \"Exact\" (match only if it exactly matches the specified rule)\nor \"Equivalent\" (match a request if it modifies a resource listed in rules, even via another API group or version).", }, "SideEffects": { - Summary: "specify whether calling the webhook will have side effects. This has an impact on dry runs and `kubectl diff`: if the sideEffect is \"Unknown\" (the default) or \"Some\", then the API server will not call the webhook on a dry-run request and fails instead. If the value is \"None\", then the webhook has no side effects and the API server will call it on dry-run. If the value is \"NoneOnDryRun\", then the webhook is responsible for inspecting the \"dryRun\" property of the AdmissionReview sent in the request, and avoiding side effects if that value is \"true.\"", - Details: "", + Summary: "specify whether calling the webhook will have side effects.", + Details: "This has an impact on dry runs and `kubectl diff`: if the sideEffect is \"Unknown\" (the default) or \"Some\", then\nthe API server will not call the webhook on a dry-run request and fails instead.\nIf the value is \"None\", then the webhook has no side effects and the API server will call it on dry-run.\nIf the value is \"NoneOnDryRun\", then the webhook is responsible for inspecting the \"dryRun\" property of the\nAdmissionReview sent in the request, and avoiding side effects if that value is \"true.\"", }, "TimeoutSeconds": { - Summary: "allows configuring how long the API server should wait for a webhook to respond before treating the call as a failure. If the timeout expires before the webhook responds, the webhook call will be ignored or the API call will be rejected based on the failure policy. The timeout value must be between 1 and 30 seconds. The timeout for an admission webhook defaults to 10 seconds.", - Details: "", + Summary: "allows configuring how long the API server should wait for a webhook to respond before treating the call as a failure.", + Details: "If the timeout expires before the webhook responds, the webhook call will be ignored or the API call will be rejected based on the failure policy.\nThe timeout value must be between 1 and 30 seconds.\nThe timeout for an admission webhook defaults to 10 seconds.", }, "Groups": { Summary: "specifies the API groups that this webhook receives requests for.", @@ -61,8 +61,8 @@ func (Config) Help() *markers.DefinitionHelp { Details: "", }, "Verbs": { - Summary: "specifies the Kubernetes API verbs that this webhook receives requests for. ", - Details: "Only modification-like verbs may be specified. May be \"create\", \"update\", \"delete\", \"connect\", or \"*\" (for all).", + Summary: "specifies the Kubernetes API verbs that this webhook receives requests for.", + Details: "Only modification-like verbs may be specified.\nMay be \"create\", \"update\", \"delete\", \"connect\", or \"*\" (for all).", }, "Versions": { Summary: "specifies the API versions that this webhook receives requests for.", @@ -73,20 +73,24 @@ func (Config) Help() *markers.DefinitionHelp { Details: "", }, "Path": { - Summary: "specifies that path that the API server should connect to this webhook on. Must be prefixed with a '/validate-' or '/mutate-' depending on the type, and followed by $GROUP-$VERSION-$KIND where all values are lower-cased and the periods in the group are substituted for hyphens. For example, a validating webhook path for type batch.tutorial.kubebuilder.io/v1,Kind=CronJob would be /validate-batch-tutorial-kubebuilder-io-v1-cronjob", - Details: "", + Summary: "specifies that path that the API server should connect to this webhook on. Must be", + Details: "prefixed with a '/validate-' or '/mutate-' depending on the type, and followed by\n$GROUP-$VERSION-$KIND where all values are lower-cased and the periods in the group\nare substituted for hyphens. For example, a validating webhook path for type\nbatch.tutorial.kubebuilder.io/v1,Kind=CronJob would be\n/validate-batch-tutorial-kubebuilder-io-v1-cronjob", }, "WebhookVersions": { - Summary: "specifies the target API versions of the {Mutating,Validating}WebhookConfiguration objects itself to generate. The only supported value is v1. Defaults to v1.", - Details: "", + Summary: "specifies the target API versions of the {Mutating,Validating}WebhookConfiguration objects", + Details: "itself to generate. The only supported value is v1. Defaults to v1.", }, "AdmissionReviewVersions": { - Summary: "is an ordered list of preferred `AdmissionReview` versions the Webhook expects.", - Details: "", + Summary: "is an ordered list of preferred `AdmissionReview`", + Details: "versions the Webhook expects.", }, "ReinvocationPolicy": { - Summary: "allows mutating webhooks to request reinvocation after other mutations ", - Details: "To allow mutating admission plugins to observe changes made by other plugins, built-in mutating admission plugins are re-run if a mutating webhook modifies an object, and mutating webhooks can specify a reinvocationPolicy to control whether they are reinvoked as well.", + Summary: "allows mutating webhooks to request reinvocation after other mutations", + Details: "To allow mutating admission plugins to observe changes made by other plugins,\nbuilt-in mutating admission plugins are re-run if a mutating webhook modifies\nan object, and mutating webhooks can specify a reinvocationPolicy to control\nwhether they are reinvoked as well.", + }, + "URL": { + Summary: "allows mutating webhooks configuration to specify an external URL when generating", + Details: "the manifests, instead of using the internal service communication. Should be in format of\nhttps://address:port/path\nWhen this option is specified, the serviceConfig.Service is removed from webhook the manifest.\nThe URL configuration should be between quotes.\n`url` cannot be specified when `path` is specified.", }, }, } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go index afe6da2e747..f60c517b766 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go @@ -144,7 +144,7 @@ func loadCrdIntoConfig( } _, label := property.Extensions.GetString(xLabelSelector) if label { - err = theConfig.AddLabelFieldSpec( + err = theConfig.AddCommonLabelsFieldSpec( makeFs(theGvk, append(path, propName))) if err != nil { return diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go index 687f21a6087..c50f627fee8 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go @@ -74,34 +74,16 @@ func (p *SortOrderTransformerPlugin) Transform(m resmap.ResMap) (err error) { // Sort if p.SortOptions.Order == types.LegacySortOrder { - s := newLegacyIDSorter(m.AllIds(), p.SortOptions.LegacySortOptions) + s := newLegacyIDSorter(m.Resources(), p.SortOptions.LegacySortOptions) sort.Sort(s) - err = applyOrdering(m, s.resids) - if err != nil { - return err - } - } - return nil -} -// applyOrdering takes resources (given in ResMap) and a desired ordering given -// as a sequence of ResIds, and updates the ResMap's resources to match the -// ordering. -func applyOrdering(m resmap.ResMap, ordering []resid.ResId) error { - var err error - resources := make([]*resource.Resource, m.Size()) - // Clear and refill with the correct order - for i, id := range ordering { - resources[i], err = m.GetByCurrentId(id) - if err != nil { - return errors.WrapPrefixf(err, "expected match for sorting") - } - } - m.Clear() - for _, r := range resources { - err = m.Append(r) - if err != nil { - return errors.WrapPrefixf(err, "SortOrderTransformer: Failed to append to resources") + // Clear the map and re-add the resources in the sorted order. + m.Clear() + for _, r := range s.resources { + err := m.Append(r) + if err != nil { + return errors.WrapPrefixf(err, "SortOrderTransformer: Failed to append to resources") + } } } return nil @@ -117,12 +99,17 @@ func applyOrdering(m resmap.ResMap, ordering []resid.ResId) error { type legacyIDSorter struct { // resids only stores the metadata of the object. This is an optimization as // it's expensive to compute these again and again during ordering. - resids []resid.ResId + resids []resid.ResId + // Initially, we sorted the metadata (ResId) of each object and then called GetByCurrentId on each to construct the final list. + // The problem is that GetByCurrentId is inefficient and does a linear scan in a list every time we do that. + // So instead, we sort resources alongside the ResIds. + resources []*resource.Resource + typeOrders map[string]int } func newLegacyIDSorter( - resids []resid.ResId, + resources []*resource.Resource, options *types.LegacySortOptions) *legacyIDSorter { // Precalculate a resource ranking based on the priority lists. var typeOrders = func() map[string]int { @@ -135,10 +122,13 @@ func newLegacyIDSorter( } return m }() - return &legacyIDSorter{ - resids: resids, - typeOrders: typeOrders, + + ret := &legacyIDSorter{typeOrders: typeOrders} + for _, res := range resources { + ret.resids = append(ret.resids, res.CurId()) + ret.resources = append(ret.resources, res) } + return ret } var _ sort.Interface = legacyIDSorter{} @@ -146,6 +136,7 @@ var _ sort.Interface = legacyIDSorter{} func (a legacyIDSorter) Len() int { return len(a.resids) } func (a legacyIDSorter) Swap(i, j int) { a.resids[i], a.resids[j] = a.resids[j], a.resids[i] + a.resources[i], a.resources[j] = a.resources[j], a.resources[i] } func (a legacyIDSorter) Less(i, j int) bool { if !a.resids[i].Gvk.Equals(a.resids[j].Gvk) { diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go index 658b0c34b91..383dcf83ea4 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/konfig/builtinpluginconsts/namereference.go @@ -421,6 +421,13 @@ nameReference: fieldSpecs: - path: spec/ingressClassName kind: Ingress + +- kind: ValidatingAdmissionPolicy + group: admissionregistration.k8s.io + fieldSpecs: + - path: spec/policyName + kind: ValidatingAdmissionPolicyBinding + group: admissionregistration.k8s.io ` ) diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/localizer.go b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/localizer.go index 71de9dd6f8d..c5c7783c4f1 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/localizer.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/localizer.go @@ -381,7 +381,7 @@ func (lc *localizer) localizeFileWithContent(path string, content []byte) (strin // 2. avoid paths that temporarily traverse outside the current root, // i.e. ../../../scope/target/current-root. The localized file will be surrounded by // different directories than its source, and so an uncleaned path may no longer be valid. - locPath = cleanFilePath(lc.fSys, lc.root, path) + locPath = cleanedRelativePath(lc.fSys, lc.root, path) } absPath := filepath.Join(lc.dst, locPath) if err := lc.fSys.MkdirAll(filepath.Dir(absPath)); err != nil { diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/locloader.go b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/locloader.go index 1d1072bd146..33a03b4fc45 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/locloader.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/locloader.go @@ -89,11 +89,8 @@ func (ll *Loader) Load(path string) ([]byte, error) { if err != nil { return nil, errors.WrapPrefixf(err, "invalid file reference") } - if filepath.IsAbs(path) { - return nil, errors.Errorf("absolute paths not yet supported in alpha: file path %q is absolute", path) - } if !loader.IsRemoteFile(path) && ll.local { - cleanPath := cleanFilePath(ll.fSys, filesys.ConfirmedDir(ll.Root()), path) + cleanPath := cleanedRelativePath(ll.fSys, filesys.ConfirmedDir(ll.Root()), path) cleanAbs := filepath.Join(ll.Root(), cleanPath) dir := filesys.ConfirmedDir(filepath.Dir(cleanAbs)) // target cannot reference newDir, as this load would've failed prior to localize; diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/util.go b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/util.go index 1a51511188f..1ec077875c6 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/localizer/util.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/localizer/util.go @@ -112,9 +112,13 @@ func hasRef(repoURL string) bool { return repoSpec.Ref != "" } -// cleanFilePath returns file cleaned, where file is a relative path to root on fSys -func cleanFilePath(fSys filesys.FileSystem, root filesys.ConfirmedDir, file string) string { - abs := root.Join(file) +// cleanedRelativePath returns a cleaned relative path of file to root on fSys +func cleanedRelativePath(fSys filesys.FileSystem, root filesys.ConfirmedDir, file string) string { + abs := file + if !filepath.IsAbs(file) { + abs = root.Join(file) + } + dir, f, err := fSys.CleanedAbs(abs) if err != nil { log.Fatalf("cannot clean validated file path %q: %s", abs, err) diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go index ab847eb6ecf..c539c290d5b 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go @@ -21,6 +21,7 @@ type TransformerConfig struct { NameSuffix types.FsSlice `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` NameSpace types.FsSlice `json:"namespace,omitempty" yaml:"namespace,omitempty"` CommonLabels types.FsSlice `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` + Labels types.FsSlice `json:"labels,omitempty" yaml:"labels,omitempty"` TemplateLabels types.FsSlice `json:"templateLabels,omitempty" yaml:"templateLabels,omitempty"` CommonAnnotations types.FsSlice `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` NameReference nbrSlice `json:"nameReference,omitempty" yaml:"nameReference,omitempty"` @@ -41,6 +42,7 @@ func (t *TransformerConfig) DeepCopy() *TransformerConfig { NameSuffix: t.NameSuffix.DeepCopy(), NameSpace: t.NameSpace.DeepCopy(), CommonLabels: t.CommonLabels.DeepCopy(), + Labels: t.Labels.DeepCopy(), TemplateLabels: t.TemplateLabels.DeepCopy(), CommonAnnotations: t.CommonAnnotations.DeepCopy(), NameReference: t.NameReference.DeepCopy(), @@ -94,6 +96,7 @@ func (t *TransformerConfig) sortFields() { sort.Sort(t.NameSuffix) sort.Sort(t.NameSpace) sort.Sort(t.CommonLabels) + sort.Sort(t.Labels) sort.Sort(t.TemplateLabels) sort.Sort(t.CommonAnnotations) sort.Sort(t.NameReference) @@ -114,12 +117,18 @@ func (t *TransformerConfig) AddSuffixFieldSpec(fs types.FieldSpec) (err error) { return err } -// AddLabelFieldSpec adds a FieldSpec to CommonLabels -func (t *TransformerConfig) AddLabelFieldSpec(fs types.FieldSpec) (err error) { +// AddCommonLabelsFieldSpec adds a FieldSpec to CommonLabels +func (t *TransformerConfig) AddCommonLabelsFieldSpec(fs types.FieldSpec) (err error) { t.CommonLabels, err = t.CommonLabels.MergeOne(fs) return err } +// AddLabelsFieldSpec adds a FieldSpec to Labels +func (t *TransformerConfig) AddLabelsFieldSpec(fs types.FieldSpec) (err error) { + t.Labels, err = t.Labels.MergeOne(fs) + return err //nolint:wrapcheck +} + // AddAnnotationFieldSpec adds a FieldSpec to CommonAnnotations func (t *TransformerConfig) AddAnnotationFieldSpec(fs types.FieldSpec) (err error) { t.CommonAnnotations, err = t.CommonAnnotations.MergeOne(fs) @@ -162,6 +171,10 @@ func (t *TransformerConfig) Merge(input *TransformerConfig) ( if err != nil { return nil, errors.WrapPrefixf(err, "failed to merge CommonLabels fieldSpec") } + merged.Labels, err = t.Labels.MergeAll(input.Labels) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge Labels fieldSpec") + } merged.TemplateLabels, err = t.TemplateLabels.MergeAll(input.TemplateLabels) if err != nil { return nil, errors.WrapPrefixf(err, "failed to merge TemplateLabels fieldSpec") diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go index 654e22da36b..5531b7967f3 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/load_go_plugin_disabled.go @@ -14,11 +14,12 @@ package loader import ( + "fmt" + "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/kyaml/errors" "sigs.k8s.io/kustomize/kyaml/resid" ) func (l *Loader) loadGoPlugin(_ resid.ResId, _ string) (resmap.Configurable, error) { - return nil, errors.New("plugin load is disabled") + return nil, fmt.Errorf("plugin load is disabled") } diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go index 44c82bc5973..5f1d1095a17 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go @@ -449,9 +449,6 @@ func (kt *KustTarget) accumulateResources( ra, err = kt.accumulateDirectory(ra, ldr, false) } if err != nil { - if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file - return nil, errF - } return nil, errors.WrapPrefixf( err, "accumulation err='%s'", errF.Error()) } @@ -460,7 +457,7 @@ func (kt *KustTarget) accumulateResources( return ra, nil } -// accumulateResources fills the given resourceAccumulator +// accumulateComponents fills the given resourceAccumulator // with resources read from the given list of paths. func (kt *KustTarget) accumulateComponents( ra *accumulator.ResAccumulator, paths []string) (*accumulator.ResAccumulator, error) { diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go index b589961e5fc..1ba028a36fe 100644 --- a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go @@ -275,13 +275,25 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( if len(kt.kustomization.Labels) == 0 && len(kt.kustomization.CommonLabels) == 0 { return } + + type labelStruct struct { + Labels map[string]string + FieldSpecs []types.FieldSpec + } + for _, label := range kt.kustomization.Labels { - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } + var c labelStruct + c.Labels = label.Pairs fss := types.FsSlice(label.FieldSpecs) + + // merge labels specified in the label section of transformer configs + // these apply to selectors and templates + fss, err := fss.MergeAll(tc.Labels) + if err != nil { + return nil, fmt.Errorf("failed to merge labels: %w", err) + } + // merge the custom fieldSpecs with the default if label.IncludeSelectors { fss, err = fss.MergeAll(tc.CommonLabels) @@ -297,7 +309,7 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( fss, err = fss.MergeOne(types.FieldSpec{Path: "metadata/labels", CreateIfNotPresent: true}) } if err != nil { - return nil, err + return nil, fmt.Errorf("failed to merge labels: %w", err) } c.FieldSpecs = fss p := f() @@ -307,10 +319,9 @@ var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( } result = append(result, p) } - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } + + var c labelStruct + c.Labels = kt.kustomization.CommonLabels c.FieldSpecs = tc.CommonLabels p := f() diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/factory.go b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go index f67fbf36071..fef2f6e499b 100644 --- a/vendor/sigs.k8s.io/kustomize/api/resource/factory.go +++ b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go @@ -41,28 +41,26 @@ func (rf *Factory) Hasher() ifc.KustHasher { } // FromMap returns a new instance of Resource. -func (rf *Factory) FromMap(m map[string]interface{}) *Resource { +func (rf *Factory) FromMap(m map[string]interface{}) (*Resource, error) { res, err := rf.FromMapAndOption(m, nil) if err != nil { - // TODO: return err instead of log. - log.Fatalf("failed to create resource from map: %v", err) + return nil, fmt.Errorf("failed to create resource from map: %w", err) } - return res + return res, nil } // FromMapWithName returns a new instance with the given "original" name. -func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) *Resource { +func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) (*Resource, error) { return rf.FromMapWithNamespaceAndName(resid.DefaultNamespace, n, m) } // FromMapWithNamespaceAndName returns a new instance with the given "original" namespace. -func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource { +func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) (*Resource, error) { r, err := rf.FromMapAndOption(m, nil) if err != nil { - // TODO: return err instead of log. - log.Fatalf("failed to create resource from map: %v", err) + return nil, fmt.Errorf("failed to create resource from map: %w", err) } - return r.setPreviousId(ns, n, r.GetKind()) + return r.setPreviousId(ns, n, r.GetKind()), nil } // FromMapAndOption returns a new instance of Resource with given options. diff --git a/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go index f86ec0b9e27..5fe32ebcf7e 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go @@ -55,6 +55,7 @@ type Kustomization struct { // Namespace to add to all objects. Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + // Deprecated: Use the Labels field instead, which provides a superset of the functionality of CommonLabels. // CommonLabels to add to all objects and selectors. CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` diff --git a/vendor/sigs.k8s.io/kustomize/api/types/labels.go b/vendor/sigs.k8s.io/kustomize/api/types/labels.go index 05ba890f9d2..35f7fb2a5b0 100644 --- a/vendor/sigs.k8s.io/kustomize/api/types/labels.go +++ b/vendor/sigs.k8s.io/kustomize/api/types/labels.go @@ -6,12 +6,12 @@ package types type Label struct { // Pairs contains the key-value pairs for labels to add Pairs map[string]string `json:"pairs,omitempty" yaml:"pairs,omitempty"` - // IncludeSelectors inidicates should transformer include the + // IncludeSelectors indicates whether the transformer should include the // fieldSpecs for selectors. Custom fieldSpecs specified by // FieldSpecs will be merged with builtin fieldSpecs if this // is true. IncludeSelectors bool `json:"includeSelectors,omitempty" yaml:"includeSelectors,omitempty"` - // IncludeTemplates inidicates should transformer include the + // IncludeTemplates indicates whether the transformer should include the // spec/template/metadata fieldSpec. Custom fieldSpecs specified by // FieldSpecs will be merged with spec/template/metadata fieldSpec if this // is true. If IncludeSelectors is true, IncludeTemplates is not needed. diff --git a/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/grep.go b/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/grep.go index 54659f5100b..9e2f4b47de2 100644 --- a/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/grep.go +++ b/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/grep.go @@ -54,6 +54,9 @@ type GrepRunner struct { } func (r *GrepRunner) preRunE(c *cobra.Command, args []string) error { + if len(args) == 0 { + return fmt.Errorf("missing required argument: QUERY") + } r.GrepFilter.Compare = func(a, b string) (int, error) { qa, err := resource.ParseQuantity(a) if err != nil { diff --git a/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/run-fns.go b/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/run-fns.go index ee94468817c..bcf98768306 100644 --- a/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/run-fns.go +++ b/vendor/sigs.k8s.io/kustomize/cmd/config/internal/commands/run-fns.go @@ -20,7 +20,7 @@ import ( "sigs.k8s.io/kustomize/cmd/config/internal/generateddocs/commands" ) -// GetCatRunner returns a RunFnRunner. +// GetRunFnRunner returns a RunFnRunner. func GetRunFnRunner(name string) *RunFnRunner { r := &RunFnRunner{} c := &cobra.Command{ diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/create/create.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/create/create.go index 81a05dbab0e..6b867c9089e 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/create/create.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/create/create.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" "github.com/spf13/cobra" @@ -113,7 +114,7 @@ func runCreate(opts createFlags, fSys filesys.FileSystem, rf *resource.Factory) return err } for _, resource := range detected { - if kustfile.StringInSlice(resource, resources) { + if slices.Contains(resources, resource) { continue } resources = append(resources, resource) diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbase.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbase.go index 01a422e5a81..b33f128df22 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbase.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbase.go @@ -8,6 +8,8 @@ import ( "fmt" "strings" + "slices" + "github.com/spf13/cobra" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" "sigs.k8s.io/kustomize/kyaml/filesys" @@ -64,7 +66,7 @@ func (o *addBaseOptions) RunAddBase(fSys filesys.FileSystem) error { if !fSys.Exists(path) { return errors.New(path + " does not exist") } - if kustfile.StringInSlice(path, m.Resources) { + if slices.Contains(m.Resources, path) { return fmt.Errorf("base %s already in kustomization file", path) } m.Resources = append(m.Resources, path) diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbuildmetadata.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbuildmetadata.go index e4aa86ca336..d8af378424c 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbuildmetadata.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addbuildmetadata.go @@ -5,6 +5,7 @@ package add import ( "fmt" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" @@ -60,7 +61,7 @@ func (o *addBuildMetadataOptions) RunAddBuildMetadata(fSys filesys.FileSystem) e return err } for _, opt := range o.buildMetadataOptions { - if kustfile.StringInSlice(opt, m.BuildMetadata) { + if slices.Contains(m.BuildMetadata, opt) { return fmt.Errorf("buildMetadata option %s already in kustomization file", opt) } m.BuildMetadata = append(m.BuildMetadata, opt) diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addcomponent.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addcomponent.go index c0f728f717a..a5078ae73da 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addcomponent.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addcomponent.go @@ -6,6 +6,7 @@ package add import ( "errors" "log" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/api/pkg/loader" @@ -69,7 +70,7 @@ func (o *addComponentOptions) RunAddComponent(fSys filesys.FileSystem) error { for _, component := range components { if mf.GetPath() != component { - if kustfile.StringInSlice(component, m.Components) { + if slices.Contains(m.Components, component) { log.Printf("component %s already in kustomization file", component) continue } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addgenerator.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addgenerator.go index ee475d1b47e..ab1d17230df 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addgenerator.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addgenerator.go @@ -6,6 +6,7 @@ package add import ( "errors" "log" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" @@ -62,7 +63,7 @@ func (o *addGeneratorOptions) RunAddGenerator(fSys filesys.FileSystem) error { return err } for _, t := range o.generatorFilePaths { - if kustfile.StringInSlice(t, m.Generators) { + if slices.Contains(m.Generators, t) { log.Printf("generator %s already in kustomization file", t) continue } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go index eda017b76e1..93cf91a006b 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addmetadata.go @@ -39,6 +39,7 @@ type addMetadataOptions struct { mapValidator func(map[string]string) error kind kindOfAdd labelsWithoutSelector bool + includeTemplates bool } // newCmdAddAnnotation adds one or more commonAnnotations to the kustomization file. @@ -83,6 +84,9 @@ func newCmdAddLabel(fSys filesys.FileSystem, v func(map[string]string) error) *c cmd.Flags().BoolVar(&o.labelsWithoutSelector, "without-selector", false, "using add labels without selector option", ) + cmd.Flags().BoolVar(&o.includeTemplates, "include-templates", false, + "include labels in templates (requires --without-selector)", + ) return cmd } @@ -112,6 +116,9 @@ func (o *addMetadataOptions) validateAndParse(args []string) error { if len(args) < 1 { return fmt.Errorf("must specify %s", o.kind) } + if !o.labelsWithoutSelector && o.includeTemplates { + return fmt.Errorf("--without-selector flag must be specified for --include-templates to work") + } m, err := util.ConvertSliceToMap(args, o.kind.String()) if err != nil { return err @@ -132,7 +139,11 @@ func (o *addMetadataOptions) addAnnotations(m *types.Kustomization) error { func (o *addMetadataOptions) addLabels(m *types.Kustomization) error { if o.labelsWithoutSelector { - m.Labels = append(m.Labels, types.Label{Pairs: make(map[string]string), IncludeSelectors: false}) + m.Labels = append(m.Labels, types.Label{ + Pairs: make(map[string]string), + IncludeSelectors: false, + IncludeTemplates: o.includeTemplates, + }) return o.writeToMap(m.Labels[len(m.Labels)-1].Pairs, label) } if m.CommonLabels == nil { diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addresource.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addresource.go index 529f2b11948..3c63b4b139d 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addresource.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addresource.go @@ -6,6 +6,7 @@ package add import ( "errors" "log" + "slices" "github.com/spf13/cobra" ldrhelper "sigs.k8s.io/kustomize/api/pkg/loader" @@ -73,7 +74,7 @@ func (o *addResourceOptions) RunAddResource(fSys filesys.FileSystem) error { for _, resource := range resources { if mf.GetPath() != resource { - if kustfile.StringInSlice(resource, m.Resources) { + if slices.Contains(m.Resources, resource) { log.Printf("resource %s already in kustomization file", resource) continue } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addtransformer.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addtransformer.go index 288ee3f3ed9..ffc4aa77eac 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addtransformer.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/add/addtransformer.go @@ -6,6 +6,7 @@ package add import ( "errors" "log" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" @@ -62,7 +63,7 @@ func (o *addTransformerOptions) RunAddTransformer(fSys filesys.FileSystem) error return err } for _, t := range o.transformerFilePaths { - if kustfile.StringInSlice(t, m.Transformers) { + if slices.Contains(m.Transformers, t) { log.Printf("transformer %s already in kustomization file", t) continue } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/fix/convert.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/fix/convert.go index f6c45080304..04162d7c178 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/fix/convert.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/fix/convert.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "path" + "slices" "strconv" "strings" @@ -55,11 +56,11 @@ func filesTouchedByKustomize(k *types.Kustomization, filepath string, fSys files files, err := fSys.ReadDir(r) if err == nil && len(files) > 0 { for _, file := range files { - if !stringInSlice(file, []string{ + if !slices.Contains([]string{ "kustomization.yaml", "kustomization.yml", "Kustomization", - }) { + }, file) { continue } @@ -223,7 +224,7 @@ func constructFieldOptions(value string, varString string) ([]*types.FieldOption return nil, fmt.Errorf("cannot convert all vars to replacements; %s is not delimited", varString) } delimiter = pre - index = indexOf(varString, strings.Split(value, delimiter)) + index = slices.Index(strings.Split(value, delimiter), varString) if index == -1 { // this should never happen return nil, fmt.Errorf("internal error: could not get index of var %s", varString) @@ -327,21 +328,3 @@ func setPlaceholderValue(varName string, files []string, fSys filesys.FileSystem } return nil } - -func stringInSlice(elem string, slice []string) bool { - for i := range slice { - if slice[i] == elem { - return true - } - } - return false -} - -func indexOf(varName string, slice []string) int { - for i := range slice { - if slice[i] == varName { - return i - } - } - return -1 -} diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removebuildmetadata.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removebuildmetadata.go index de27de0a184..58e30488ce0 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removebuildmetadata.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removebuildmetadata.go @@ -4,6 +4,8 @@ package remove import ( + "slices" + "github.com/spf13/cobra" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/util" @@ -59,7 +61,7 @@ func (o *removeBuildMetadataOptions) RunRemoveBuildMetadata(fSys filesys.FileSys } var newOptions []string for _, opt := range m.BuildMetadata { - if !kustfile.StringInSlice(opt, o.buildMetadataOptions) { + if !slices.Contains(o.buildMetadataOptions, opt) { newOptions = append(newOptions, opt) } } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeconfigmap.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeconfigmap.go index 7eeedb7d0f9..056db08e9e9 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeconfigmap.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeconfigmap.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "log" + "slices" "strings" "github.com/spf13/cobra" @@ -88,7 +89,7 @@ func (o *removeConfigMapOptions) RunRemoveConfigMap(fSys filesys.FileSystem) err remainingConfigMaps := make([]types.ConfigMapArgs, 0, len(m.ConfigMapGenerator)) for _, currentConfigMap := range m.ConfigMapGenerator { - if kustfile.StringInSlice(currentConfigMap.Name, o.configMapNamesToRemove) && + if slices.Contains(o.configMapNamesToRemove, currentConfigMap.Name) && util.NamespaceEqual(currentConfigMap.Namespace, o.namespace) { foundConfigMaps[currentConfigMap.Name] = struct{}{} continue diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removemetadata.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removemetadata.go index 3c34916fcf6..8d04062c55d 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removemetadata.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removemetadata.go @@ -40,7 +40,7 @@ type removeMetadataOptions struct { kind kindOfAdd } -// newCmdRemoveLabel removes one or more commonAnnotations from the kustomization file. +// newCmdRemoveAnnotation removes one or more commonAnnotations from the kustomization file. func newCmdRemoveAnnotation(fSys filesys.FileSystem, v func([]string) error) *cobra.Command { var o removeMetadataOptions o.kind = label diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeresource.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeresource.go index fc76943d009..1fdbf79be12 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeresource.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removeresource.go @@ -6,6 +6,7 @@ package remove import ( "errors" "path/filepath" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/api/konfig" @@ -73,7 +74,7 @@ func (o *removeResourceOptions) RunRemoveResource(fSys filesys.FileSystem) error newResources := make([]string, 0, len(m.Resources)) for _, resource := range m.Resources { - if kustfile.StringInSlice(resource, resources) { + if slices.Contains(resources, resource) { continue } newResources = append(newResources, resource) diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removesecret.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removesecret.go index 89a8af23067..d648051ed51 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removesecret.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removesecret.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "log" + "slices" "strings" "github.com/spf13/cobra" @@ -89,7 +90,7 @@ func (o *removeSecretOptions) RunRemoveSecret(fSys filesys.FileSystem) error { remainingSecrets := make([]types.SecretArgs, 0, len(m.SecretGenerator)) for _, currentSecret := range m.SecretGenerator { - if kustfile.StringInSlice(currentSecret.Name, o.secretNamesToRemove) && + if slices.Contains(o.secretNamesToRemove, currentSecret.Name) && util.NamespaceEqual(currentSecret.Namespace, o.namespace) { foundSecrets[currentSecret.Name] = struct{}{} continue diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removetransformer.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removetransformer.go index c8e188cecb0..7de87256f9e 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removetransformer.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/edit/remove/removetransformer.go @@ -5,6 +5,7 @@ package remove import ( "errors" + "slices" "github.com/spf13/cobra" "sigs.k8s.io/kustomize/api/konfig" @@ -72,7 +73,7 @@ func (o *removeTransformerOptions) RunRemoveTransformer(fSys filesys.FileSystem) newTransformers := make([]string, 0, len(m.Transformers)) for _, transformer := range m.Transformers { - if kustfile.StringInSlice(transformer, transformers) { + if slices.Contains(transformers, transformer) { continue } newTransformers = append(newTransformers, transformer) diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile/kustomizationfile.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile/kustomizationfile.go index 4804b569199..480eac473af 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile/kustomizationfile.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile/kustomizationfile.go @@ -40,6 +40,7 @@ func determineFieldOrder() []string { ordered := []string{ "MetaData", + "SortOptions", "Resources", "Bases", "NamePrefix", @@ -65,6 +66,7 @@ func determineFieldOrder() []string { "Configurations", "Generators", "Transformers", + "Validators", "Components", "OpenAPI", "BuildMetadata", @@ -189,16 +191,6 @@ func (mf *kustomizationFile) Write(kustomization *types.Kustomization) error { return mf.fSys.WriteFile(mf.path, data) } -// StringInSlice returns true if the string is in the slice. -func StringInSlice(str string, list []string) bool { - for _, v := range list { - if v == str { - return true - } - } - return false -} - func (mf *kustomizationFile) parseCommentedFields(content []byte) error { buffer := bytes.NewBuffer(content) var comments [][]byte diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/util/validate.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/util/validate.go index bf041b50f12..0fde88f784a 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/util/validate.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/internal/util/validate.go @@ -6,10 +6,10 @@ package util import ( "errors" "fmt" + "slices" "strings" "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kustomize/v5/commands/internal/kustfile" ) type BuildMetadataValidator struct{} @@ -23,7 +23,7 @@ func (b *BuildMetadataValidator) Validate(args []string) ([]string, error) { } opts := strings.Split(args[0], ",") for _, opt := range opts { - if !kustfile.StringInSlice(opt, types.BuildMetadataOptions) { + if !slices.Contains(types.BuildMetadataOptions, opt) { return nil, fmt.Errorf("invalid buildMetadata option: %s", opt) } } diff --git a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/localize/localize.go b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/localize/localize.go index d557ebceb02..5aaa9bb41d5 100644 --- a/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/localize/localize.go +++ b/vendor/sigs.k8s.io/kustomize/kustomize/v5/commands/localize/localize.go @@ -4,10 +4,14 @@ package localize import ( + "bytes" "log" + "path/filepath" "github.com/spf13/cobra" lclzr "sigs.k8s.io/kustomize/api/krusty/localizer" + "sigs.k8s.io/kustomize/kustomize/v5/commands/build" + "sigs.k8s.io/kustomize/kyaml/copyutil" "sigs.k8s.io/kustomize/kyaml/errors" "sigs.k8s.io/kustomize/kyaml/filesys" ) @@ -20,23 +24,26 @@ type arguments struct { } type flags struct { - scope string + scope string + noVerify bool } // NewCmdLocalize returns a new localize command. func NewCmdLocalize(fs filesys.FileSystem) *cobra.Command { var f flags + var buildBuffer bytes.Buffer + buildCmd := build.NewCmdBuild(fs, &build.Help{}, &buildBuffer) cmd := &cobra.Command{ Use: "localize [target [destination]]", Short: "[Alpha] Creates localized copy of target kustomization root at destination", - Long: `[Alpha] Creates copy of target kustomization directory or -versioned URL at destination, where remote references in the original + Long: `[Alpha] Creates copy of target kustomization directory or +versioned URL at destination, where remote references in the original are replaced by local references to the downloaded remote content. -If target is not specified, the current working directory will be used. -Destination is a path to a new directory in an existing directory. If -destination is not specified, a new directory will be created in the current -working directory. +If target is not specified, the current working directory will be used. +Destination is a path to a new directory in an existing directory. If +destination is not specified, a new directory will be created in the current +working directory. For details, see: https://kubectl.docs.kubernetes.io/references/kustomize/cmd/ @@ -46,7 +53,7 @@ alphabetizes kustomization fields in the localized copy. `, Example: ` # Localize the current working directory, with default scope and destination -kustomize localize +kustomize localize # Localize some local directory, with scope and default destination kustomize localize /home/path/scope/target --scope /home/path/scope @@ -62,6 +69,30 @@ kustomize localize https://github.com/kubernetes-sigs/kustomize//api/krusty/test if err != nil { return errors.Wrap(err) } + + if !f.noVerify { + originalBuild, err := runBuildCmd(buildBuffer, buildCmd, args.target) + if err != nil { + return errors.Wrap(err) + } + + buildDst := dst + if f.scope != "" && f.scope != args.target { + buildDst = filepath.Join(dst, filepath.Base(args.target)) + } + + localizedBuild, err := runBuildCmd(buildBuffer, buildCmd, buildDst) + if err != nil { + return errors.Wrap(err) + } + + if localizedBuild != originalBuild { + copyutil.PrettyFileDiff(originalBuild, localizedBuild) + log.Fatalf("VERIFICATION FAILED: `kustomize build` for %s and %s are different after localization.\n", args.target, dst) + } + log.Printf("VERIFICATION SUCCESS: `kustomize build` for %s and %s are the same after localization.\n", args.target, dst) + } + log.Printf("SUCCESS: localized %q to directory %s\n", args.target, dst) return nil }, @@ -74,6 +105,12 @@ kustomize localize https://github.com/kubernetes-sigs/kustomize//api/krusty/test Cannot specify for remote targets, as scope is by default the containing repo. If not specified for local target, scope defaults to target. `) + cmd.Flags().BoolVar(&f.noVerify, + "no-verify", + false, + `Does not verify that the outputs of kustomize build for target and newDir are the same after localization. + If not specified, this flag defaults to false and will run kustomize build. + `) return cmd } @@ -92,3 +129,16 @@ func matchArgs(rawArgs []string) arguments { } return args } + +func runBuildCmd(buffer bytes.Buffer, cmd *cobra.Command, folder string) (buildOutput string, err error) { + buffer.Reset() + buildErr := cmd.RunE(cmd, []string{folder}) + if buildErr != nil { + log.Printf("If your target directory requires flags to build: \n"+ + "1. Add executable permissions for the downloaded exec binaries in '%s'. \n"+ + "2. Run kustomize build with the necessary flags and self-verify the outputs.", folder) + return "", errors.Wrap(buildErr) + } + + return buffer.String(), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/copyutil/copyutil.go b/vendor/sigs.k8s.io/kustomize/kyaml/copyutil/copyutil.go new file mode 100644 index 00000000000..98cb8314d86 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/copyutil/copyutil.go @@ -0,0 +1,197 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// The copyutil package contains libraries for copying directories of configuration. +package copyutil + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/sergi/go-diff/diffmatchpatch" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/sets" +) + +// CopyDir copies a src directory to a dst directory. CopyDir skips copying the .git directory from the src. +func CopyDir(fSys filesys.FileSystem, src string, dst string) error { + return errors.Wrap(fSys.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + // don't copy the .git dir + if path != src { + rel := strings.TrimPrefix(path, src) + if IsDotGitFolder(rel) { + return nil + } + } + + // path is an absolute path, rather than a path relative to src. + // e.g. if src is /path/to/package, then path might be /path/to/package/and/sub/dir + // we need the path relative to src `and/sub/dir` when we are copying the files to dest. + copyTo := strings.TrimPrefix(path, src) + + // make directories that don't exist + if info.IsDir() { + return errors.Wrap(fSys.MkdirAll(filepath.Join(dst, copyTo))) + } + + // copy file by reading and writing it + b, err := fSys.ReadFile(filepath.Join(src, copyTo)) + if err != nil { + return errors.Wrap(err) + } + err = fSys.WriteFile(filepath.Join(dst, copyTo), b) + if err != nil { + return errors.Wrap(err) + } + + return nil + })) +} + +// Diff returns a list of files that differ between the source and destination. +// +// Diff is guaranteed to return a non-empty set if any files differ, but +// this set is not guaranteed to contain all differing files. +func Diff(sourceDir, destDir string) (sets.String, error) { + // get set of filenames in the package source + upstreamFiles := sets.String{} + err := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // skip git repo if it exists + if IsDotGitFolder(path) { + return nil + } + + upstreamFiles.Insert(strings.TrimPrefix(strings.TrimPrefix(path, sourceDir), string(filepath.Separator))) + return nil + }) + if err != nil { + return sets.String{}, err + } + + // get set of filenames in the cloned package + localFiles := sets.String{} + err = filepath.Walk(destDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // skip git repo if it exists + if IsDotGitFolder(path) { + return nil + } + + localFiles.Insert(strings.TrimPrefix(strings.TrimPrefix(path, destDir), string(filepath.Separator))) + return nil + }) + if err != nil { + return sets.String{}, err + } + + // verify the source and cloned packages have the same set of filenames + diff := upstreamFiles.SymmetricDifference(localFiles) + + // verify file contents match + for _, f := range upstreamFiles.Intersection(localFiles).List() { + fi, err := os.Stat(filepath.Join(destDir, f)) + if err != nil { + return diff, err + } + if fi.Mode().IsDir() { + // already checked that this directory exists in the local files + continue + } + + // compare upstreamFiles + b1, err := os.ReadFile(filepath.Join(destDir, f)) + if err != nil { + return diff, err + } + b2, err := os.ReadFile(filepath.Join(sourceDir, f)) + if err != nil { + return diff, err + } + if !bytes.Equal(b1, b2) { + fmt.Println(PrettyFileDiff(string(b1), string(b2))) + diff.Insert(f) + } + } + // return the differing files + return diff, nil +} + +// IsDotGitFolder checks if the provided path is either the .git folder or +// a file underneath the .git folder. +func IsDotGitFolder(path string) bool { + cleanPath := filepath.ToSlash(filepath.Clean(path)) + for _, c := range strings.Split(cleanPath, "/") { + if c == ".git" { + return true + } + } + return false +} + +// PrettyFileDiff takes the content of two files and returns the pretty diff +func PrettyFileDiff(s1, s2 string) string { + dmp := diffmatchpatch.New() + wSrc, wDst, warray := dmp.DiffLinesToRunes(s1, s2) + diffs := dmp.DiffMainRunes(wSrc, wDst, false) + diffs = dmp.DiffCharsToLines(diffs, warray) + return dmp.DiffPrettyText(diffs) +} + +// SyncFile copies file from src file path to a dst file path by replacement +// deletes dst file if src file doesn't exist +func SyncFile(src, dst string) error { + srcFileInfo, err := os.Stat(src) + if err != nil { + // delete dst if source doesn't exist + if err = deleteFile(dst); err != nil { + return err + } + return nil + } + + input, err := os.ReadFile(src) + if err != nil { + return err + } + + var filePerm os.FileMode + + // get the destination file perm if file exists + dstFileInfo, err := os.Stat(dst) + if err != nil { + // get source file perm if destination file doesn't exist + filePerm = srcFileInfo.Mode().Perm() + } else { + filePerm = dstFileInfo.Mode().Perm() + } + + err = os.WriteFile(dst, input, filePerm) + if err != nil { + return err + } + + return nil +} + +// deleteFile deletes file from path, returns no error if file doesn't exist +func deleteFile(path string) error { + if _, err := os.Stat(path); err != nil { + // return nil if file doesn't exist + return nil + } + return os.Remove(path) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go index 76b51a8c50c..62aa76f53a7 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go @@ -167,7 +167,7 @@ func (c *Filter) setupExec() error { return nil } -// getArgs returns the command + args to run to spawn the container +// getCommand returns the command + args to run to spawn the container func (c *Filter) getCommand() (string, []string) { network := runtimeutil.NetworkNameNone if c.ContainerSpec.Network { diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go index 0ba3d838215..5ef48d7e36d 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go @@ -86,7 +86,7 @@ func (i *ignoreFilesMatcher) matchFile(path string) bool { return i.matchers[len(i.matchers)-1].matcher.Match(path, false) } -// matchFile checks whether the directory given by the provided path matches +// matchDir checks whether the directory given by the provided path matches // any of the patterns in the .krmignore file for the package. func (i *ignoreFilesMatcher) matchDir(path string) bool { if len(i.matchers) == 0 { diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go index 49647be4b9e..421f70f7c2b 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go @@ -64,7 +64,7 @@ func (fi bindataFileInfo) Mode() os.FileMode { return fi.mode } -// Mode return file modify time +// ModTime return file modify time func (fi bindataFileInfo) ModTime() time.Time { return fi.modTime } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go index 41ee0afc5aa..aa1c8d4a859 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go @@ -182,7 +182,7 @@ type ResourceSchema struct { Schema *spec.Schema } -// IsEmpty returns true if the ResourceSchema is empty +// IsMissingOrNull returns true if the ResourceSchema is missing or null func (rs *ResourceSchema) IsMissingOrNull() bool { if rs == nil || rs.Schema == nil { return true diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go index 12c72ae83db..5908cd7bd2d 100644 --- a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go @@ -20,7 +20,7 @@ const ( BareSeqNodeWrappingKey = "bareSeqNodeWrappingKey" ) -// SeqIndentType holds the indentation style for sequence nodes +// SequenceIndentStyle holds the indentation style for sequence nodes type SequenceIndentStyle string // EncoderOptions are options that can be used to configure the encoder,